Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/11609/head
Alexander Alekhin 7 years ago
commit 0f298a4203
  1. 3
      apps/interactive-calibration/frameProcessor.cpp
  2. 251
      doc/tutorials/imgproc/histograms/back_projection/back_projection.markdown
  3. 246
      doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.markdown
  4. 188
      doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.markdown
  5. 126
      doc/tutorials/imgproc/histograms/histogram_equalization/histogram_equalization.markdown
  6. 209
      doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.markdown
  7. 10
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  8. 44
      modules/calib3d/src/calibinit.cpp
  9. 2
      modules/calib3d/src/calibration.cpp
  10. 8
      modules/core/include/opencv2/core/cvdef.h
  11. 2
      modules/core/include/opencv2/core/private.cuda.hpp
  12. 2
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  13. 4
      modules/dnn/misc/python/pyopencv_dnn.hpp
  14. 6
      modules/dnn/src/dnn.cpp
  15. 2
      modules/dnn/src/layers/batch_norm_layer.cpp
  16. 2
      modules/dnn/src/layers/convolution_layer.cpp
  17. 4
      modules/dnn/src/layers/fully_connected_layer.cpp
  18. 2
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  19. 3
      modules/dnn/src/layers/recurrent_layers.cpp
  20. 79
      modules/dnn/src/layers/scale_layer.cpp
  21. 145
      modules/dnn/src/layers/shift_layer.cpp
  22. 2
      modules/dnn/src/ocl4dnn/src/math_functions.cpp
  23. 34
      modules/dnn/src/tensorflow/tf_importer.cpp
  24. 7
      modules/dnn/test/test_halide_layers.cpp
  25. 3
      modules/dnn/test/test_tf_importer.cpp
  26. 2
      modules/highgui/include/opencv2/highgui/highgui_c.h
  27. 9
      modules/imgproc/include/opencv2/imgproc.hpp
  28. 26
      modules/imgproc/perf/perf_houghcircles.cpp
  29. 43
      modules/imgproc/perf/perf_houghlines.cpp
  30. 3
      modules/imgproc/src/color.hpp
  31. 18
      modules/imgproc/src/filter.cpp
  32. 169
      modules/imgproc/src/hough.cpp
  33. 75
      modules/imgproc/test/test_filter.cpp
  34. 49
      modules/imgproc/test/test_houghcircles.cpp
  35. 86
      modules/imgproc/test/test_houghlines.cpp
  36. 16
      modules/imgproc/test/test_intersection.cpp
  37. 2
      modules/photo/src/merge.cpp
  38. 41
      modules/videoio/src/cap.cpp
  39. 3
      modules/videoio/src/cap_gstreamer.cpp
  40. 179
      modules/videoio/src/cap_msmf.cpp
  41. 10
      modules/videoio/src/cap_openni.cpp
  42. 8
      modules/videoio/src/precomp.hpp
  43. 32
      samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
  44. 64
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
  45. 31
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp
  46. 57
      samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
  47. 68
      samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
  48. 76
      samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
  49. 59
      samples/dnn/tf_text_graph_ssd.py
  50. 173
      samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java
  51. 189
      samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java
  52. 99
      samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
  53. 91
      samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
  54. 49
      samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
  55. 24
      samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
  56. 24
      samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java
  57. 94
      samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java
  58. 80
      samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
  59. 71
      samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py
  60. 79
      samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py
  61. 71
      samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
  62. 69
      samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
  63. 31
      samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
  64. 54
      samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py

@ -395,7 +395,8 @@ ShowProcessor::ShowProcessor(cv::Ptr<calibrationData> data, cv::Ptr<calibControl
cv::Mat ShowProcessor::processFrame(const cv::Mat &frame)
{
if(mCalibdata->cameraMatrix.size[0] && mCalibdata->distCoeffs.size[0]) {
if (!mCalibdata->cameraMatrix.empty() && !mCalibdata->distCoeffs.empty())
{
mTextSize = VIDEO_TEXT_SIZE * (double) frame.cols / IMAGE_MAX_WIDTH;
cv::Scalar textColor = cv::Scalar(0,0,255);
cv::Mat frameCopy;

@ -67,46 +67,104 @@ Code
- Calculate the histogram (and update it if the bins change) and the backprojection of the
same image.
- Display the backprojection and the histogram in windows.
- **Downloadable code**:
-# Click
@add_toggle_cpp
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
for the basic version (explained in this tutorial).
-# For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
-# ...or you can always check out the classical
- ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
- ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java
@end_toggle
@add_toggle_python
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
- ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py
@end_toggle
Explanation
-----------
-# Declare the matrices to store our images and initialize the number of bins to be used by our
histogram:
@code{.cpp}
Mat src; Mat hsv; Mat hue;
int bins = 25;
@endcode
-# Read the input image and transform it to HSV format:
@code{.cpp}
src = imread( argv[1], 1 );
cvtColor( src, hsv, COLOR_BGR2HSV );
@endcode
-# For this tutorial, we will use only the Hue value for our 1-D histogram (check out the fancier
- Read the input image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Read the image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Read the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Read the image
@end_toggle
- Transform it to HSV format:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Transform it to HSV
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Transform it to HSV
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Transform it to HSV
@end_toggle
- For this tutorial, we will use only the Hue value for our 1-D histogram (check out the fancier
code in the links above if you want to use the more standard H-S histogram, which yields better
results):
@code{.cpp}
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
@endcode
as you see, we use the function @ref cv::mixChannels to get only the channel 0 (Hue) from
the hsv image. It gets the following parameters:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Use only the Hue value
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Use only the Hue value
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Use only the Hue value
@end_toggle
- as you see, we use the function @ref cv::mixChannels to get only the channel 0 (Hue) from
the hsv image. It gets the following parameters:
- **&hsv:** The source array from which the channels will be copied
- **1:** The number of source arrays
- **&hue:** The destination array of the copied channels
@ -115,59 +173,108 @@ Explanation
case, the Hue(0) channel of &hsv is being copied to the 0 channel of &hue (1-channel)
- **1:** Number of index pairs
-# Create a Trackbar for the user to enter the bin values. Any change on the Trackbar means a call
- Create a Trackbar for the user to enter the bin values. Any change on the Trackbar means a call
to the **Hist_and_Backproj** callback function.
@code{.cpp}
char* window_image = "Source image";
namedWindow( window_image, WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);
@endcode
-# Show the image and wait for the user to exit the program:
@code{.cpp}
imshow( window_image, src );
waitKey(0);
return 0;
@endcode
-# **Hist_and_Backproj function:** Initialize the arguments needed for @ref cv::calcHist . The
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Create Trackbar to enter the number of bins
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Create Trackbar to enter the number of bins
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Create Trackbar to enter the number of bins
@end_toggle
- Show the image and wait for the user to exit the program:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Show the image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Show the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Show the image
@end_toggle
- **Hist_and_Backproj function:** Initialize the arguments needed for @ref cv::calcHist . The
number of bins comes from the Trackbar:
@code{.cpp}
void Hist_and_Backproj(int, void* )
{
MatND hist;
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
@endcode
-# Calculate the Histogram and normalize it to the range \f$[0,255]\f$
@code{.cpp}
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
@endcode
-# Get the Backprojection of the same image by calling the function @ref cv::calcBackProject
@code{.cpp}
MatND backproj;
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
@endcode
all the arguments are known (the same as used to calculate the histogram), only we add the
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp initialize
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java initialize
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py initialize
@end_toggle
- Calculate the Histogram and normalize it to the range \f$[0,255]\f$
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Get the Histogram and normalize it
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Get the Histogram and normalize it
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Get the Histogram and normalize it
@end_toggle
- Get the Backprojection of the same image by calling the function @ref cv::calcBackProject
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Get Backprojection
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Get Backprojection
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Get Backprojection
@end_toggle
- all the arguments are known (the same as used to calculate the histogram), only we add the
backproj matrix, which will store the backprojection of the source image (&hue)
-# Display backproj:
@code{.cpp}
imshow( "BackProj", backproj );
@endcode
-# Draw the 1-D Hue histogram of the image:
@code{.cpp}
int w = 400; int h = 400;
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( w, h, CV_8UC3 );
for( int i = 0; i < bins; i ++ )
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); }
imshow( "Histogram", histImg );
@endcode
- Display backproj:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Draw the backproj
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Draw the backproj
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Draw the backproj
@end_toggle
- Draw the 1-D Hue histogram of the image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Draw the histogram
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Draw the histogram
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Draw the histogram
@end_toggle
Results
-------

@ -17,7 +17,8 @@ histogram called *Image histogram*. Now we will considerate it in its more gener
- Histograms are collected *counts* of data organized into a set of predefined *bins*
- When we say *data* we are not restricting it to be intensity values (as we saw in the previous
Tutorial). The data collected can be whatever feature you find useful to describe your image.
Tutorial @ref tutorial_histogram_equalization). The data collected can be whatever feature you find
useful to describe your image.
- Let's see an example. Imagine that a Matrix contains information of an image (i.e. intensity in
the range \f$0-255\f$):
@ -65,64 +66,126 @@ Code
- Splits the image into its R, G and B planes using the function @ref cv::split
- Calculate the Histogram of each 1-channel plane by calling the function @ref cv::calcHist
- Plot the three histograms in a window
@add_toggle_cpp
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
@end_toggle
Explanation
-----------
-# Create the necessary matrices:
@code{.cpp}
Mat src, dst;
@endcode
-# Load the source image
@code{.cpp}
src = imread( argv[1], 1 );
- Load the source image
if( !src.data )
{ return -1; }
@endcode
-# Separate the source image in its three R,G and B planes. For this we use the OpenCV function
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Load image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Load image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Load image
@end_toggle
- Separate the source image in its three R,G and B planes. For this we use the OpenCV function
@ref cv::split :
@code{.cpp}
vector<Mat> bgr_planes;
split( src, bgr_planes );
@endcode
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Separate the image in 3 places ( B, G and R )
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Separate the image in 3 places ( B, G and R )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Separate the image in 3 places ( B, G and R )
@end_toggle
our input is the image to be divided (this case with three channels) and the output is a vector
of Mat )
-# Now we are ready to start configuring the **histograms** for each plane. Since we are working
- Now we are ready to start configuring the **histograms** for each plane. Since we are working
with the B, G and R planes, we know that our values will range in the interval \f$[0,255]\f$
-# Establish number of bins (5, 10...):
@code{.cpp}
int histSize = 256; //from 0 to 255
@endcode
-# Set the range of values (as we said, between 0 and 255 )
@code{.cpp}
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ; //the upper boundary is exclusive
const float* histRange = { range };
@endcode
-# We want our bins to have the same size (uniform) and to clear the histograms in the
- Establish the number of bins (5, 10...):
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Establish the number of bins
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Establish the number of bins
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Establish the number of bins
@end_toggle
- Set the range of values (as we said, between 0 and 255 )
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Set the ranges ( for B,G,R) )
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Set the ranges ( for B,G,R) )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Set the ranges ( for B,G,R) )
@end_toggle
- We want our bins to have the same size (uniform) and to clear the histograms in the
beginning, so:
@code{.cpp}
bool uniform = true; bool accumulate = false;
@endcode
-# Finally, we create the Mat objects to save our histograms. Creating 3 (one for each plane):
@code{.cpp}
Mat b_hist, g_hist, r_hist;
@endcode
-# We proceed to calculate the histograms by using the OpenCV function @ref cv::calcHist :
@code{.cpp}
/// Compute the histograms:
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
@endcode
where the arguments are:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Set histogram param
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Set histogram param
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Set histogram param
@end_toggle
- We proceed to calculate the histograms by using the OpenCV function @ref cv::calcHist :
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Compute the histograms
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Compute the histograms
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Compute the histograms
@end_toggle
- where the arguments are (**C++ code**):
- **&bgr_planes[0]:** The source array(s)
- **1**: The number of source arrays (in this case we are using 1. We can enter here also
a list of arrays )
@ -137,50 +200,59 @@ Explanation
- **uniform** and **accumulate**: The bin sizes are the same and the histogram is cleared
at the beginning.
-# Create an image to display the histograms:
@code{.cpp}
// Draw the histograms for R, G and B
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
- Create an image to display the histograms:
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
@endcode
-# Notice that before drawing, we first @ref cv::normalize the histogram so its values fall in the
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Draw the histograms for B, G and R
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Draw the histograms for B, G and R
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Draw the histograms for B, G and R
@end_toggle
- Notice that before drawing, we first @ref cv::normalize the histogram so its values fall in the
range indicated by the parameters entered:
@code{.cpp}
/// Normalize the result to [ 0, histImage.rows ]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
@endcode
this function receives these arguments:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Normalize the result to ( 0, histImage.rows )
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Normalize the result to ( 0, histImage.rows )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Normalize the result to ( 0, histImage.rows )
@end_toggle
- this function receives these arguments (**C++ code**):
- **b_hist:** Input array
- **b_hist:** Output normalized array (can be the same)
- **0** and\**histImage.rows: For this example, they are the lower and upper limits to
normalize the values ofr_hist*\*
- **0** and **histImage.rows**: For this example, they are the lower and upper limits to
normalize the values of **r_hist**
- **NORM_MINMAX:** Argument that indicates the type of normalization (as described above, it
adjusts the values between the two limits set before)
- **-1:** Implies that the output normalized array will be the same type as the input
- **Mat():** Optional mask
-# Finally, observe that to access the bin (in this case in this 1D-Histogram):
@code{.cpp}
/// Draw for each channel
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
Scalar( 255, 0, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
Scalar( 0, 255, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
Scalar( 0, 0, 255), 2, 8, 0 );
}
@endcode
we use the expression:
- Observe that to access the bin (in this case in this 1D-Histogram):
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Draw for each channel
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Draw for each channel
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Draw for each channel
@end_toggle
we use the expression (**C++ code**):
@code{.cpp}
b_hist.at<float>(i)
@endcode
@ -189,20 +261,24 @@ Explanation
b_hist.at<float>( i, j )
@endcode
-# Finally we display our histograms and wait for the user to exit:
@code{.cpp}
namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage );
- Finally we display our histograms and wait for the user to exit:
waitKey(0);
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Display
@end_toggle
return 0;
@endcode
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Display
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Display
@end_toggle
Result
------
-# Using as input argument an image like the shown below:
-# Using as input argument an image like the one shown below:
![](images/Histogram_Calculation_Original_Image.jpg)

@ -43,90 +43,118 @@ Code
- Compare the histogram of the *base image* with respect to the 2 test histograms, the
histogram of the lower half base image and with the same base image histogram.
- Display the numerical matching parameters obtained.
@add_toggle_cpp
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
@include cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
@end_toggle
Explanation
-----------
-# Declare variables such as the matrices to store the base image and the two other images to
compare ( BGR and HSV )
@code{.cpp}
Mat src_base, hsv_base;
Mat src_test1, hsv_test1;
Mat src_test2, hsv_test2;
Mat hsv_half_down;
@endcode
-# Load the base image (src_base) and the other two test images:
@code{.cpp}
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
}
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 );
@endcode
-# Convert them to HSV format:
@code{.cpp}
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
@endcode
-# Also, create an image of half the base image (in HSV format):
@code{.cpp}
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
@endcode
-# Initialize the arguments to calculate the histograms (bins, ranges and channels H and S ).
@code{.cpp}
int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
float h_ranges[] = { 0, 180 };
float s_ranges[] = { 0, 256 };
const float* ranges[] = { h_ranges, s_ranges };
int channels[] = { 0, 1 };
@endcode
-# Create the MatND objects to store the histograms:
@code{.cpp}
MatND hist_base;
MatND hist_half_down;
MatND hist_test1;
MatND hist_test2;
@endcode
-# Calculate the Histograms for the base image, the 2 test images and the half-down base image:
@code{.cpp}
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
@endcode
-# Apply sequentially the 4 comparison methods between the histogram of the base image (hist_base)
- Load the base image (src_base) and the other two test images:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Load three images with different environment settings
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Load three images with different environment settings
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Load three images with different environment settings
@end_toggle
- Convert them to HSV format:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Convert to HSV
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Convert to HSV
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Convert to HSV
@end_toggle
- Also, create an image of half the base image (in HSV format):
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Convert to HSV half
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Convert to HSV half
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Convert to HSV half
@end_toggle
- Initialize the arguments to calculate the histograms (bins, ranges and channels H and S ).
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Using 50 bins for hue and 60 for saturation
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Using 50 bins for hue and 60 for saturation
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Using 50 bins for hue and 60 for saturation
@end_toggle
- Calculate the Histograms for the base image, the 2 test images and the half-down base image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Calculate the histograms for the HSV images
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Calculate the histograms for the HSV images
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Calculate the histograms for the HSV images
@end_toggle
- Apply sequentially the 4 comparison methods between the histogram of the base image (hist_base)
and the other histograms:
@code{.cpp}
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
@endcode
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Apply the histogram comparison methods
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Apply the histogram comparison methods
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Apply the histogram comparison methods
@end_toggle
Results
-------
@ -144,13 +172,13 @@ Results
are from the same source. For the other two test images, we can observe that they have very
different lighting conditions, so the matching should not be very good:
-# Here the numeric results:
-# Here the numeric results we got with OpenCV 3.4.1:
*Method* | Base - Base | Base - Half | Base - Test 1 | Base - Test 2
----------------- | ------------ | ------------ | -------------- | ---------------
*Correlation* | 1.000000 | 0.930766 | 0.182073 | 0.120447
*Chi-square* | 0.000000 | 4.940466 | 21.184536 | 49.273437
*Intersection* | 24.391548 | 14.959809 | 3.889029 | 5.775088
*Bhattacharyya* | 0.000000 | 0.222609 | 0.646576 | 0.801869
*Correlation* | 1.000000 | 0.880438 | 0.20457 | 0.0664547
*Chi-square* | 0.000000 | 4.6834 | 2697.98 | 4763.8
*Intersection* | 18.8947 | 13.022 | 5.44085 | 2.58173
*Bhattacharyya* | 0.000000 | 0.237887 | 0.679826 | 0.874173
For the *Correlation* and *Intersection* methods, the higher the metric, the more accurate the
match. As we can see, the match *base-base* is the highest of all as expected. Also we can observe
that the match *base-half* is the second best match (as we predicted). For the other two metrics,

@ -22,7 +22,7 @@ Theory
### What is Histogram Equalization?
- It is a method that improves the contrast in an image, in order to stretch out the intensity
range.
range (see also the corresponding <a href="https://en.wikipedia.org/wiki/Histogram_equalization">Wikipedia entry</a>).
- To make it clearer, from the image above, you can see that the pixels seem clustered around the
middle of the available range of intensities. What Histogram Equalization does is to *stretch
out* this range. Take a look at the figure below: The green circles indicate the
@ -61,53 +61,105 @@ Code
- Convert the original image to grayscale
- Equalize the Histogram by using the OpenCV function @ref cv::equalizeHist
- Display the source and equalized images in a window.
@add_toggle_cpp
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
@end_toggle
Explanation
-----------
-# Declare the source and destination images as well as the windows names:
@code{.cpp}
Mat src, dst;
char* source_window = "Source image";
char* equalized_window = "Equalized Image";
@endcode
-# Load the source image:
@code{.cpp}
src = imread( argv[1], 1 );
if( !src.data )
{ cout<<"Usage: ./Histogram_Demo <path_to_image>"<<endl;
return -1;}
@endcode
-# Convert it to grayscale:
@code{.cpp}
cvtColor( src, src, COLOR_BGR2GRAY );
@endcode
-# Apply histogram equalization with the function @ref cv::equalizeHist :
@code{.cpp}
equalizeHist( src, dst );
@endcode
- Load the source image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Load image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Load image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Load image
@end_toggle
- Convert it to grayscale:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Convert to grayscale
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Convert to grayscale
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Convert to grayscale
@end_toggle
- Apply histogram equalization with the function @ref cv::equalizeHist :
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Apply Histogram Equalization
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Apply Histogram Equalization
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Apply Histogram Equalization
@end_toggle
As it can be easily seen, the only arguments are the original image and the output (equalized)
image.
-# Display both images (original and equalized) :
@code{.cpp}
namedWindow( source_window, WINDOW_AUTOSIZE );
namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );
@endcode
-# Wait until user exists the program
@code{.cpp}
waitKey(0);
return 0;
@endcode
- Display both images (original and equalized):
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Display results
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Display results
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Display results
@end_toggle
- Wait until user exists the program
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Wait until user exits the program
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Wait until user exits the program
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Wait until user exits the program
@end_toggle
Results
-------

@ -80,7 +80,7 @@ Theory
Code
----
-# **What does this program do?**
- **What does this program do?**
- Loads an image
- Applies an Affine Transform to the image. This transform is obtained from the relation
between three points. We use the function @ref cv::warpAffine for that purpose.
@ -88,57 +88,88 @@ Code
the image center
- Waits until the user exits the program
-# The tutorial's code is shown below. You can also download it here
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp)
@add_toggle_cpp
- The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
@end_toggle
@add_toggle_java
- The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
@end_toggle
@add_toggle_python
- The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
@include samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py
@end_toggle
Explanation
-----------
-# Declare some variables we will use, such as the matrices to store our results and 2 arrays of
points to store the 2D points that define our Affine Transform.
@code{.cpp}
Point2f srcTri[3];
Point2f dstTri[3];
Mat rot_mat( 2, 3, CV_32FC1 );
Mat warp_mat( 2, 3, CV_32FC1 );
Mat src, warp_dst, warp_rotate_dst;
@endcode
-# Load an image:
@code{.cpp}
src = imread( argv[1], 1 );
@endcode
-# Initialize the destination image as having the same size and type as the source:
@code{.cpp}
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
@endcode
-# **Affine Transform:** As we explained in lines above, we need two sets of 3 points to derive the
- Load an image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Load the image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Load the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Load the image
@end_toggle
- **Affine Transform:** As we explained in lines above, we need two sets of 3 points to derive the
affine transform relation. Have a look:
@code{.cpp}
srcTri[0] = Point2f( 0, 0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 );
dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 );
@endcode
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Set your 3 points to calculate the Affine Transform
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Set your 3 points to calculate the Affine Transform
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Set your 3 points to calculate the Affine Transform
@end_toggle
You may want to draw these points to get a better idea on how they change. Their locations are
approximately the same as the ones depicted in the example figure (in the Theory section). You
may note that the size and orientation of the triangle defined by the 3 points change.
-# Armed with both sets of points, we calculate the Affine Transform by using OpenCV function @ref
- Armed with both sets of points, we calculate the Affine Transform by using OpenCV function @ref
cv::getAffineTransform :
@code{.cpp}
warp_mat = getAffineTransform( srcTri, dstTri );
@endcode
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Get the Affine Transform
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Get the Affine Transform
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Get the Affine Transform
@end_toggle
We get a \f$2 \times 3\f$ matrix as an output (in this case **warp_mat**)
-# We then apply the Affine Transform just found to the src image
@code{.cpp}
warpAffine( src, warp_dst, warp_mat, warp_dst.size() );
@endcode
- We then apply the Affine Transform just found to the src image
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Apply the Affine Transform just found to the src image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Apply the Affine Transform just found to the src image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Apply the Affine Transform just found to the src image
@end_toggle
with the following arguments:
- **src**: Input image
@ -149,47 +180,87 @@ Explanation
We just got our first transformed image! We will display it in one bit. Before that, we also
want to rotate it...
-# **Rotate:** To rotate an image, we need to know two things:
- **Rotate:** To rotate an image, we need to know two things:
-# The center with respect to which the image will rotate
-# The angle to be rotated. In OpenCV a positive angle is counter-clockwise
-# *Optional:* A scale factor
We define these parameters with the following snippet:
@code{.cpp}
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6;
@endcode
-# We generate the rotation matrix with the OpenCV function @ref cv::getRotationMatrix2D , which
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Compute a rotation matrix with respect to the center of the image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Compute a rotation matrix with respect to the center of the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Compute a rotation matrix with respect to the center of the image
@end_toggle
- We generate the rotation matrix with the OpenCV function @ref cv::getRotationMatrix2D , which
returns a \f$2 \times 3\f$ matrix (in this case *rot_mat*)
@code{.cpp}
rot_mat = getRotationMatrix2D( center, angle, scale );
@endcode
-# We now apply the found rotation to the output of our previous Transformation.
@code{.cpp}
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
@endcode
-# Finally, we display our results in two windows plus the original image for good measure:
@code{.cpp}
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst );
namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst );
@endcode
-# We just have to wait until the user exits the program
@code{.cpp}
waitKey(0);
@endcode
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Get the rotation matrix with the specifications above
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Get the rotation matrix with the specifications above
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Get the rotation matrix with the specifications above
@end_toggle
- We now apply the found rotation to the output of our previous Transformation:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Rotate the warped image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Rotate the warped image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Rotate the warped image
@end_toggle
- Finally, we display our results in two windows plus the original image for good measure:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Show what you got
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Show what you got
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Show what you got
@end_toggle
- We just have to wait until the user exits the program
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Wait until user exits the program
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Wait until user exits the program
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Wait until user exits the program
@end_toggle
Result
------
-# After compiling the code above, we can give it the path of an image as argument. For instance,
- After compiling the code above, we can give it the path of an image as argument. For instance,
for a picture like:
![](images/Warp_Affine_Tutorial_Original_Image.jpg)

@ -165,6 +165,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_warp_affine
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
@ -173,6 +175,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_equalization
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
@ -181,6 +185,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_calculation
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
@ -189,6 +195,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_comparison
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
@ -197,6 +205,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_back_projection
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán

@ -252,7 +252,7 @@ static int icvSmoothHistogram( const std::vector<int>& piHist, std::vector<int>&
for ( int ii=-iWidth; ii<=iWidth; ii++)
{
iIdx = i+ii;
if (iIdx > 0 && iIdx < 256)
if (iIdx >= 0 && iIdx < 256)
{
iSmooth += piHist[iIdx];
}
@ -293,7 +293,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
std::vector<int> piHistSmooth(iNumBins, 0);
std::vector<int> piHistGrad(iNumBins, 0);
std::vector<int> piAccumSum(iNumBins, 0);
std::vector<int> piMaxPos(20, 0);
std::vector<int> piMaxPos; piMaxPos.reserve(20);
int iThresh = 0;
int iIdx;
int iWidth = 1;
@ -319,7 +319,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
{
if ( (piHistGrad[i-1] < 0) && (piHistGrad[i] > 0) )
{
piMaxPos[iCntMaxima] = i;
piMaxPos.push_back(i);
iCntMaxima++;
}
}
@ -332,15 +332,35 @@ static bool icvBinarizationHistogramBased( Mat & img )
iSumAroundMax = piHistSmooth[iIdx-1] + piHistSmooth[iIdx] + piHistSmooth[iIdx+1];
if ( iSumAroundMax < iMaxPix1 && iIdx < 64 )
{
for ( int j=i; j<iCntMaxima-1; j++ )
{
piMaxPos[j] = piMaxPos[j+1];
}
piMaxPos.erase(piMaxPos.begin() + i);
iCntMaxima--;
i--;
}
}
if ( iCntMaxima == 1)
CV_Assert((size_t)iCntMaxima == piMaxPos.size());
PRINTF("HIST: MAXIMA COUNT: %d (%d, %d, %d, ...)\n", iCntMaxima,
iCntMaxima > 0 ? piMaxPos[0] : -1,
iCntMaxima > 1 ? piMaxPos[1] : -1,
iCntMaxima > 2 ? piMaxPos[2] : -1);
if (iCntMaxima == 0)
{
// no any maxima inside (except 0 and 255 which are not handled above)
// Does image black-write already?
const int iMaxPix2 = iMaxPix / 2;
for (int sum = 0, i = 0; i < 256; ++i) // select mean intensity
{
sum += piHistIntensity[i];
if (sum > iMaxPix2)
{
iThresh = i;
break;
}
}
}
else if (iCntMaxima == 1)
{
iThresh = piMaxPos[0]/2;
}
@ -380,7 +400,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
int iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]];
//IF TOO CLOSE TO 255, jump to next maximum
if ( piMaxPos[iIdxBGMax] >= 250 && iIdxBGMax < iCntMaxima )
if ( piMaxPos[iIdxBGMax] >= 250 && iIdxBGMax + 1 < iCntMaxima )
{
iIdxBGMax++;
iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]];
@ -497,7 +517,8 @@ int cvFindChessboardCorners( const void* arr, CvSize pattern_size,
int max_quad_buf_size = 0;
cvFree(&quads);
cvFree(&corners);
int quad_count = icvGenerateQuads( &quads, &corners, storage, thresh_img_new, flags, &max_quad_buf_size );
Mat binarized_img = thresh_img_new.clone(); // make clone because cvFindContours modifies the source image
int quad_count = icvGenerateQuads( &quads, &corners, storage, binarized_img, flags, &max_quad_buf_size );
PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("New quads", thresh_img_new, quads, quad_count);
if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size))
@ -562,7 +583,8 @@ int cvFindChessboardCorners( const void* arr, CvSize pattern_size,
int max_quad_buf_size = 0;
cvFree(&quads);
cvFree(&corners);
int quad_count = icvGenerateQuads( &quads, &corners, storage, thresh_img, flags, &max_quad_buf_size);
Mat binarized_img = (useAdaptive) ? thresh_img : thresh_img.clone(); // make clone because cvFindContours modifies the source image
int quad_count = icvGenerateQuads( &quads, &corners, storage, binarized_img, flags, &max_quad_buf_size);
PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("Old quads", thresh_img, quads, quad_count);
if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size))

@ -3889,7 +3889,7 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
P3.at<double>(0,3) *= P3.at<double>(0,0);
P3.at<double>(1,3) *= P3.at<double>(1,1);
if( !_imgpt1.empty() && _imgpt3.empty() )
if( !_imgpt1.empty() && !_imgpt3.empty() )
adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
_cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);

@ -435,7 +435,7 @@ Cv64suf;
// Integer types portatibility
#ifdef OPENCV_STDINT_HEADER
#include OPENCV_STDINT_HEADER
#else
#elif defined(__cplusplus)
#if defined(_MSC_VER) && _MSC_VER < 1600 /* MSVS 2010 */
namespace cv {
typedef signed char int8_t;
@ -472,9 +472,15 @@ typedef ::int64_t int64_t;
typedef ::uint64_t uint64_t;
}
#endif
#else // pure C
#include <stdint.h>
#endif
//! @}
#ifndef __cplusplus
#include "opencv2/core/fast_math.hpp" // define cvRound(double)
#endif
#endif // OPENCV_CORE_CVDEF_H

@ -152,7 +152,7 @@ namespace cv { namespace cuda
inline ~NppStreamHandler()
{
cudaStreamSynchronize(oldStream);
nppSafeSetStream(nppGetStream(), oldStream);
}
private:

@ -503,7 +503,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
class CV_EXPORTS ShiftLayer : public Layer
{
public:
static Ptr<ShiftLayer> create(const LayerParams& params);
static Ptr<Layer> create(const LayerParams& params);
};
class CV_EXPORTS PriorBoxLayer : public Layer

@ -142,7 +142,7 @@ public:
PyGILState_Release(gstate);
if (!res)
CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method");
pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0));
CV_Assert(pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)));
return false;
}
@ -163,7 +163,7 @@ public:
CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method");
std::vector<Mat> pyOutputs;
pyopencv_to(res, pyOutputs, ArgInfo("", 0));
CV_Assert(pyopencv_to(res, pyOutputs, ArgInfo("", 0)));
CV_Assert(pyOutputs.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); ++i)

@ -1530,10 +1530,12 @@ struct Net::Impl
LayerData *eltwiseData = nextData;
// go down from the second input and find the first non-skipped layer.
LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid];
CV_Assert(downLayerData);
while (downLayerData->skip)
{
downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
}
CV_Assert(downLayerData);
// second input layer is current layer.
if ( ld.id == downLayerData->id )
@ -1548,9 +1550,7 @@ struct Net::Impl
downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
}
Ptr<ConvolutionLayer> convLayer;
if( downLayerData )
convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
Ptr<ConvolutionLayer> convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
// first input layer is convolution layer
if( !convLayer.empty() && eltwiseData->consumers.size() == 1 )

@ -283,7 +283,7 @@ public:
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = weights_.total();
const size_t numChannels = weights_.total();
ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C);

@ -456,7 +456,7 @@ public:
if (hasBias() || fusedBias)
{
Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {outCn}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE

@ -427,9 +427,9 @@ public:
std::shared_ptr<InferenceEngine::FullyConnectedLayer> ieLayer(new InferenceEngine::FullyConnectedLayer(lp));
ieLayer->_out_num = blobs[0].size[0];
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {blobs[0].size[0], blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
if (blobs.size() > 1)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {ieLayer->_out_num}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();

@ -254,7 +254,7 @@ public:
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
const int numChannels = blobs[0].total();
const size_t numChannels = blobs[0].total();
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE

@ -119,9 +119,10 @@ public:
if (blobs.size() > 3)
{
CV_Assert(blobs.size() == 6);
const int N = Wh.cols;
for (int i = 3; i < 6; ++i)
{
CV_Assert(blobs[i].rows == Wh.cols && blobs[i].cols == Wh.cols);
CV_Assert(blobs[i].rows == N && blobs[i].cols == N);
CV_Assert(blobs[i].type() == bias.type());
}
}

@ -28,6 +28,7 @@ public:
setParamsFrom(params);
hasBias = params.get<bool>("bias_term", false);
axis = params.get<int>("axis", 1);
hasWeights = false;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -35,11 +36,16 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == 1 + hasBias);
outputs.assign(1, inputs[0]);
return true;
}
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
@ -63,10 +69,15 @@ public:
Mat &inpBlob = *inputs[0];
Mat &outBlob = outputs[0];
Mat &weights = blobs.empty() ? *inputs[1] : blobs[0];
Mat bias = hasBias ? blobs.back() : Mat();
// There is a mode when we multiply a first blob by a second one
// instead of trainable weights.
Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
if (!weights.empty())
weights = weights.reshape(1, 1);
MatShape inpShape = shape(inpBlob);
const int numWeights = weights.total();
const int numWeights = !weights.empty() ? weights.total() : bias.total();
CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total());
int endAxis;
for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
@ -84,15 +95,15 @@ public:
if (endAxis != inpBlob.dims)
{
float* weightsData = (float*)weights.data;
float* weightsData = !weights.empty() ? (float*)weights.data : 0;
float* biasesData = hasBias ? (float*)bias.data : 0;
int spatialSize = total(inpShape, endAxis); // spatialSize != 1
for (int i = 0; i < numSlices; ++i)
{
for (int j = 0; j < numWeights; ++j)
{
float w = weightsData[j];
float b = hasBias ? biasesData[j] : 0;
float w = weightsData ? weightsData[j] : 1;
float b = biasesData ? biasesData[j] : 0;
Mat inpSlice(1, spatialSize, CV_32F, inpData);
Mat outSlice(1, spatialSize, CV_32F, outData);
inpSlice.convertTo(outSlice, CV_32F, w, b);
@ -105,12 +116,16 @@ public:
{
for (int i = 0; i < numSlices; ++i)
{
Mat inpSlice(weights.dims, weights.size, CV_32F, inpData);
Mat outSlice(weights.dims, weights.size, CV_32F, outData);
Mat inpSlice(1, numWeights, CV_32F, inpData);
Mat outSlice(1, numWeights, CV_32F, outData);
if (!weights.empty())
{
multiply(inpSlice, weights, outSlice);
if (hasBias)
add(outSlice, bias, outSlice);
}
else if (hasBias)
add(inpSlice, bias, outSlice);
inpData += numWeights;
outData += numWeights;
}
@ -157,11 +172,15 @@ public:
const int numChannels = blobs[0].total();
Halide::Expr topExpr = input;
if (hasWeights)
{
auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
Halide::Expr topExpr = input * weights(c);
topExpr *= weights(c);
}
if (hasBias)
{
auto bias = wrapToHalideBuffer(blobs[1], {numChannels});
auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});
topExpr += bias(c);
}
top(x, y, c, n) = topExpr;
@ -178,10 +197,24 @@ public:
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = blobs[0].total();
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
if (hasWeights)
{
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
ieLayer->_weights = weights;
}
if (hasBias)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {numChannels}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
@ -190,8 +223,8 @@ public:
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = !blobs.empty() ? blobs[0] : Mat();
shift = hasBias ? blobs[1] : Mat();
scale = hasWeights ? blobs[0] : Mat();
shift = hasBias ? blobs.back() : Mat();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
@ -205,6 +238,9 @@ public:
}
return flops;
}
private:
bool hasWeights;
};
@ -213,5 +249,16 @@ Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
}
Ptr<Layer> ShiftLayer::create(const LayerParams& params)
{
LayerParams scaleParams;
scaleParams.name = params.name;
scaleParams.type = "Scale";
scaleParams.blobs = params.blobs;
scaleParams.set("bias_term", true);
scaleParams.set("axis", 0);
return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));
}
} // namespace dnn
} // namespace cv

@ -1,145 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of shift layer, which adds up const values to blob.
*/
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class ShiftLayerImpl CV_FINAL : public ShiftLayer
{
public:
ShiftLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
CV_Assert(blobs.size() == 1);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
internals.assign(1, shape(1, total(inputs[0], 2)));
return true;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() > 0);
if(inputs[0]->dims == blobs[0].dims)
{
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
outBlob = inpBlob + blobs[0];
}
}
else
{
Mat biasOnesMat = internals[0];
biasOnesMat.setTo(1);
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
inpBlob.copyTo(outBlob);
for (int n = 0; n < inpBlob.size[0]; n++)
{
Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3],
outBlob.type(), outBlob.ptr(n));
gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv
}
}
}
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
// Inference Engine has no layer just for biases. Create a linear
// transformation layer with ones weights.
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{blobs[0].total()});
weights->allocate();
std::vector<float> ones(blobs[0].total(), 1);
weights->set(ones);
ieLayer->_weights = weights;
ieLayer->_biases = wrapToInfEngineBlob(blobs[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = Mat();
shift = blobs[0];
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i= 0; i < inputs.size(); i++)
{
flops += total(inputs[i]);
}
return flops;
}
};
Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params)
{
return Ptr<ShiftLayer>(new ShiftLayerImpl(params));
}
}
}

@ -504,7 +504,7 @@ static bool ocl4dnnFastBufferGEMM(const CBLAS_TRANSPOSE TransA,
oclk_gemm_float.set(arg_idx++, (float)alpha);
oclk_gemm_float.set(arg_idx++, (float)beta);
bool ret;
bool ret = true;
if (TransB == CblasNoTrans || TransA != CblasNoTrans) {
int stride = 256;
for (int start_index = 0; start_index < K; start_index += stride) {

@ -743,10 +743,20 @@ void TFImporter::populateNet(Net dstNet)
if (haveConst)
{
layerParams.blobs.resize(1);
blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
Mat values = getTensorContent(getConstBlob(layer, value_id));
CV_Assert(values.type() == CV_32FC1);
int id = dstNet.addLayer(name, "Shift", layerParams);
int id;
if (values.total() == 1) // is a scalar.
{
layerParams.set("shift", values.at<float>(0));
id = dstNet.addLayer(name, "Power", layerParams);
}
else // is a vector
{
layerParams.blobs.resize(1, values);
id = dstNet.addLayer(name, "Shift", layerParams);
}
layer_id[name] = id;
// one input only
@ -777,11 +787,21 @@ void TFImporter::populateNet(Net dstNet)
}
CV_Assert(haveConst);
layerParams.blobs.resize(1);
blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
layerParams.blobs[0] *= -1;
Mat values = getTensorContent(getConstBlob(layer, value_id));
CV_Assert(values.type() == CV_32FC1);
values *= -1.0f;
int id = dstNet.addLayer(name, "Shift", layerParams);
int id;
if (values.total() == 1) // is a scalar.
{
layerParams.set("shift", values.at<float>(0));
id = dstNet.addLayer(name, "Power", layerParams);
}
else // is a vector
{
layerParams.blobs.resize(1, values);
id = dstNet.addLayer(name, "Shift", layerParams);
}
layer_id[name] = id;
// one input only

@ -40,17 +40,18 @@ TEST(Padding_Halide, Accuracy)
{
static const int kNumRuns = 10;
std::vector<int> paddings(8);
cv::RNG& rng = cv::theRNG();
for (int t = 0; t < kNumRuns; ++t)
{
for (int i = 0; i < paddings.size(); ++i)
paddings[i] = rand() % 5;
paddings[i] = rng(5);
LayerParams lp;
lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
lp.type = "Padding";
lp.name = "testLayer";
Mat input({1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10}, CV_32F);
Mat input({1 + rng(10), 1 + rng(10), 1 + rng(10), 1 + rng(10)}, CV_32F);
test(lp, input);
}
}
@ -633,7 +634,7 @@ TEST_P(Eltwise, Accuracy)
eltwiseParam.set("operation", op);
if (op == "sum" && weighted)
{
RNG rng = cv::theRNG();
RNG& rng = cv::theRNG();
std::vector<float> coeff(1 + numConv);
for (int i = 0; i < coeff.size(); ++i)
{

@ -376,7 +376,8 @@ TEST(Test_TensorFlow, memory_read)
class ResizeBilinearLayer CV_FINAL : public Layer
{
public:
ResizeBilinearLayer(const LayerParams &params) : Layer(params)
ResizeBilinearLayer(const LayerParams &params) : Layer(params),
outWidth(0), outHeight(0), factorWidth(1), factorHeight(1)
{
CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(!blobs.empty());

@ -135,8 +135,10 @@ CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOS
CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value);
CVAPI(double) cvGetWindowProperty(const char* name, int prop_id);
#ifdef __cplusplus // FIXIT remove in OpenCV 4.0
/* Get window image rectangle coordinates, width and height */
CVAPI(cv::Rect)cvGetWindowImageRect(const char* name);
#endif
/* display image within window (highgui windows remember their content) */
CVAPI(void) cvShowImage( const char* name, const CvArr* image );

@ -1977,10 +1977,11 @@ detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good ex
transform.
@param image 8-bit, single-channel binary source image. The image may be modified by the function.
@param lines Output vector of lines. Each line is represented by a two-element vector
\f$(\rho, \theta)\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
@param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
\f$(\rho, \theta)\f$ or \f$(\rho, \theta, \votes)\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
the image). \f$\theta\f$ is the line rotation angle in radians (
\f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
\f$\votes\f$ is the value of accumulator.
@param rho Distance resolution of the accumulator in pixels.
@param theta Angle resolution of the accumulator in radians.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@ -2155,8 +2156,8 @@ you know it. Or, you may set maxRadius to a negative number to return centers on
search, and find the correct radius using an additional procedure.
@param image 8-bit, single-channel, grayscale input image.
@param circles Output vector of found circles. Each vector is encoded as a 3-element
floating-point vector \f$(x, y, radius)\f$ .
@param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
floating-point vector \f$(x, y, radius)\f$ or \f$(x, y, radius, votes)\f$ .
@param method Detection method, see #HoughModes. Currently, the only implemented method is #HOUGH_GRADIENT
@param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has

@ -56,4 +56,30 @@ PERF_TEST(PerfHoughCircles2, ManySmallCircles)
SANITY_CHECK_NOTHING();
}
PERF_TEST(PerfHoughCircles4f, Basic)
{
string filename = getDataPath("cv/imgproc/stuff.jpg");
const double dp = 1.0;
double minDist = 20;
double edgeThreshold = 20;
double accumThreshold = 30;
int minRadius = 20;
int maxRadius = 200;
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty()) << "Unable to load source image " << filename;
GaussianBlur(img, img, Size(9, 9), 2, 2);
vector<Vec4f> circles;
declare.in(img);
TEST_CYCLE()
{
HoughCircles(img, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
}
SANITY_CHECK_NOTHING();
}
} // namespace

@ -69,4 +69,47 @@ PERF_TEST_P(Image_RhoStep_ThetaStep_Threshold, HoughLines,
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(Image_RhoStep_ThetaStep_Threshold, HoughLines3f,
testing::Combine(
testing::Values( "cv/shared/pic5.png", "stitching/a1.png" ),
testing::Values( 1, 10 ),
testing::Values( 0.01, 0.1 ),
testing::Values( 0.5, 1.1 )
)
)
{
string filename = getDataPath(get<0>(GetParam()));
double rhoStep = get<1>(GetParam());
double thetaStep = get<2>(GetParam());
double threshold_ratio = get<3>(GetParam());
Mat image = imread(filename, IMREAD_GRAYSCALE);
if (image.empty())
FAIL() << "Unable to load source image" << filename;
Canny(image, image, 32, 128);
// add some syntetic lines:
line(image, Point(0, 0), Point(image.cols, image.rows), Scalar::all(255), 3);
line(image, Point(image.cols, 0), Point(image.cols/2, image.rows), Scalar::all(255), 3);
vector<Vec3f> lines;
declare.time(60);
int hough_threshold = (int)(std::min(image.cols, image.rows) * threshold_ratio);
TEST_CYCLE() HoughLines(image, lines, rhoStep, thetaStep, hough_threshold);
printf("%dx%d: %d lines\n", image.cols, image.rows, (int)lines.size());
if (threshold_ratio < 1.0)
{
EXPECT_GE(lines.size(), 2u);
}
EXPECT_LT(lines.size(), 3000u);
SANITY_CHECK_NOTHING();
}
} // namespace

@ -285,7 +285,8 @@ struct CvtHelper
template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE >
struct OclHelper
{
OclHelper( InputArray _src, OutputArray _dst, int dcn)
OclHelper( InputArray _src, OutputArray _dst, int dcn) :
nArgs(0)
{
src = _src.getUMat();
Size sz = src.size(), dstSz;

@ -4643,7 +4643,8 @@ static bool ippFilter2D(int stype, int dtype, int kernel_type,
static bool dftFilter2D(int stype, int dtype, int kernel_type,
uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int full_width, int full_height,
int offset_x, int offset_y,
uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height,
int anchor_x, int anchor_y,
@ -4666,8 +4667,8 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
Point anchor = Point(anchor_x, anchor_y);
Mat kernel = Mat(Size(kernel_width, kernel_height), kernel_type, kernel_data, kernel_step);
Mat src(Size(width, height), stype, src_data, src_step);
Mat dst(Size(width, height), dtype, dst_data, dst_step);
Mat src(Size(full_width-offset_x, full_height-offset_y), stype, src_data, src_step);
Mat dst(Size(full_width, full_height), dtype, dst_data, dst_step);
Mat temp;
int src_channels = CV_MAT_CN(stype);
int dst_channels = CV_MAT_CN(dtype);
@ -4680,10 +4681,10 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
// we just use that.
int corrDepth = ddepth;
if ((ddepth == CV_32F || ddepth == CV_64F) && src_data != dst_data) {
temp = Mat(Size(width, height), dtype, dst_data, dst_step);
temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
} else {
corrDepth = ddepth == CV_64F ? CV_64F : CV_32F;
temp.create(Size(width, height), CV_MAKETYPE(corrDepth, dst_channels));
temp.create(Size(full_width, full_height), CV_MAKETYPE(corrDepth, dst_channels));
}
crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(corrDepth, src_channels),
@ -4694,9 +4695,9 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
}
} else {
if (src_data != dst_data)
temp = Mat(Size(width, height), dtype, dst_data, dst_step);
temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
else
temp.create(Size(width, height), dtype);
temp.create(Size(full_width, full_height), dtype);
crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(ddepth, src_channels),
anchor, delta, borderType);
@ -4830,7 +4831,8 @@ void filter2D(int stype, int dtype, int kernel_type,
res = dftFilter2D(stype, dtype, kernel_type,
src_data, src_step,
dst_data, dst_step,
width, height,
full_width, full_height,
offset_x, offset_y,
kernel_data, kernel_step,
kernel_width, kernel_height,
anchor_x, anchor_y,

@ -105,48 +105,56 @@ array of (rho, theta) pairs. linesMax is the buffer size (number of pairs).
Functions return the actual number of found lines.
*/
static void
HoughLinesStandard( const Mat& img, float rho, float theta,
int threshold, std::vector<Vec2f>& lines, int linesMax,
HoughLinesStandard( InputArray src, OutputArray lines, int type,
float rho, float theta,
int threshold, int linesMax,
double min_theta, double max_theta )
{
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Internal error");
Mat img = src.getMat();
int i, j;
float irho = 1 / rho;
CV_Assert( img.type() == CV_8UC1 );
CV_Assert( linesMax > 0 );
const uchar* image = img.ptr();
int step = (int)img.step;
int width = img.cols;
int height = img.rows;
if (max_theta < min_theta ) {
CV_Error( CV_StsBadArg, "max_theta must be greater than min_theta" );
}
int max_rho = width + height;
int min_rho = -max_rho;
CV_CheckGE(max_theta, min_theta, "max_theta must be greater than min_theta");
int numangle = cvRound((max_theta - min_theta) / theta);
int numrho = cvRound(((width + height) * 2 + 1) / rho);
int numrho = cvRound(((max_rho - min_rho) + 1) / rho);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
CV_IPP_CHECK()
if (type == CV_32FC2 && CV_IPP_CHECK_COND)
{
IppiSize srcSize = { width, height };
IppPointPolar delta = { rho, theta };
IppPointPolar dstRoi[2] = {{(Ipp32f) -(width + height), (Ipp32f) min_theta},{(Ipp32f) (width + height), (Ipp32f) max_theta}};
IppPointPolar dstRoi[2] = {{(Ipp32f) min_rho, (Ipp32f) min_theta},{(Ipp32f) max_rho, (Ipp32f) max_theta}};
int bufferSize;
int nz = countNonZero(img);
int ipp_linesMax = std::min(linesMax, nz*numangle/threshold);
int linesCount = 0;
lines.resize(ipp_linesMax);
std::vector<Vec2f> _lines(ipp_linesMax);
IppStatus ok = ippiHoughLineGetSize_8u_C1R(srcSize, delta, ipp_linesMax, &bufferSize);
Ipp8u* buffer = ippsMalloc_8u_L(bufferSize);
if (ok >= 0) {ok = CV_INSTRUMENT_FUN_IPP(ippiHoughLine_Region_8u32f_C1R, image, step, srcSize, (IppPointPolar*) &lines[0], dstRoi, ipp_linesMax, &linesCount, delta, threshold, buffer);};
if (ok >= 0) {ok = CV_INSTRUMENT_FUN_IPP(ippiHoughLine_Region_8u32f_C1R, image, step, srcSize, (IppPointPolar*) &_lines[0], dstRoi, ipp_linesMax, &linesCount, delta, threshold, buffer);};
ippsFree(buffer);
if (ok >= 0)
{
lines.resize(linesCount);
lines.create(linesCount, 1, CV_32FC2);
Mat(linesCount, 1, CV_32FC2, &_lines[0]).copyTo(lines);
CV_IMPL_ADD(CV_IMPL_IPP);
return;
}
lines.clear();
setIppErrorStatus();
}
#endif
@ -185,6 +193,9 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
// stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = std::min(linesMax, (int)_sort_buf.size());
double scale = 1./(numrho+2);
lines.create(linesMax, 1, type);
Mat _lines = lines.getMat();
for( i = 0; i < linesMax; i++ )
{
LinePolar line;
@ -193,7 +204,15 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = static_cast<float>(min_theta) + n * theta;
lines.push_back(Vec2f(line.rho, line.angle));
if (type == CV_32FC2)
{
_lines.at<Vec2f>(i) = Vec2f(line.rho, line.angle);
}
else
{
CV_DbgAssert(type == CV_32FC3);
_lines.at<Vec3f>(i) = Vec3f(line.rho, line.angle, (float)accum[idx]);
}
}
}
@ -212,15 +231,17 @@ struct hough_index
static void
HoughLinesSDiv( const Mat& img,
HoughLinesSDiv( InputArray image, OutputArray lines, int type,
float rho, float theta, int threshold,
int srn, int stn,
std::vector<Vec2f>& lines, int linesMax,
int srn, int stn, int linesMax,
double min_theta, double max_theta )
{
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Internal error");
#define _POINT(row, column)\
(image_src[(row)*step+(column)])
Mat img = image.getMat();
int index, i;
int ri, ti, ti1, ti0;
int row, col;
@ -343,7 +364,7 @@ HoughLinesSDiv( const Mat& img,
if( count * 100 > rn * tn )
{
HoughLinesStandard( img, rho, theta, threshold, lines, linesMax, min_theta, max_theta );
HoughLinesStandard( image, lines, type, rho, theta, threshold, linesMax, min_theta, max_theta );
return;
}
@ -415,11 +436,21 @@ HoughLinesSDiv( const Mat& img,
}
}
lines.create((int)lst.size(), 1, type);
Mat _lines = lines.getMat();
for( size_t idx = 0; idx < lst.size(); idx++ )
{
if( lst[idx].rho < 0 )
continue;
lines.push_back(Vec2f(lst[idx].rho, lst[idx].theta));
if (type == CV_32FC2)
{
_lines.at<Vec2f>((int)idx) = Vec2f(lst[idx].rho, lst[idx].theta);
}
else
{
CV_DbgAssert(type == CV_32FC3);
_lines.at<Vec3f>((int)idx) = Vec3f(lst[idx].rho, lst[idx].theta, (float)lst[idx].value);
}
}
}
@ -861,24 +892,26 @@ static bool ocl_HoughLinesP(InputArray _src, OutputArray _lines, double rho, dou
#endif /* HAVE_OPENCL */
void HoughLines( InputArray _image, OutputArray _lines,
void HoughLines( InputArray _image, OutputArray lines,
double rho, double theta, int threshold,
double srn, double stn, double min_theta, double max_theta )
{
CV_INSTRUMENT_REGION()
CV_OCL_RUN(srn == 0 && stn == 0 && _image.isUMat() && _lines.isUMat(),
ocl_HoughLines(_image, _lines, rho, theta, threshold, min_theta, max_theta));
int type = CV_32FC2;
if (lines.fixedType())
{
type = lines.type();
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Wrong type of output lines");
}
Mat image = _image.getMat();
std::vector<Vec2f> lines;
CV_OCL_RUN(srn == 0 && stn == 0 && _image.isUMat() && lines.isUMat() && type == CV_32FC2,
ocl_HoughLines(_image, lines, rho, theta, threshold, min_theta, max_theta));
if( srn == 0 && stn == 0 )
HoughLinesStandard(image, (float)rho, (float)theta, threshold, lines, INT_MAX, min_theta, max_theta );
HoughLinesStandard(_image, lines, type, (float)rho, (float)theta, threshold, INT_MAX, min_theta, max_theta );
else
HoughLinesSDiv(image, (float)rho, (float)theta, threshold, cvRound(srn), cvRound(stn), lines, INT_MAX, min_theta, max_theta);
Mat(lines).copyTo(_lines);
HoughLinesSDiv(_image, lines, type, (float)rho, (float)theta, threshold, cvRound(srn), cvRound(stn), INT_MAX, min_theta, max_theta);
}
@ -1007,11 +1040,16 @@ static bool cmpAccum(const EstimatedCircle& left, const EstimatedCircle& right)
return false;
}
inline Vec3f GetCircle(const EstimatedCircle& est)
static inline Vec3f GetCircle(const EstimatedCircle& est)
{
return est.c;
}
static inline Vec4f GetCircle4f(const EstimatedCircle& est)
{
return Vec4f(est.c[0], est.c[1], est.c[2], (float)est.accum);
}
class NZPointList : public std::vector<Point>
{
private:
@ -1264,12 +1302,13 @@ private:
Mutex& _lock;
};
static bool CheckDistance(const std::vector<Vec3f> &circles, size_t endIdx, const Vec3f& circle, float minDist2)
template<typename T>
static bool CheckDistance(const std::vector<T> &circles, size_t endIdx, const T& circle, float minDist2)
{
bool goodPoint = true;
for (uint j = 0; j < endIdx; ++j)
{
Vec3f pt = circles[j];
T pt = circles[j];
float distX = circle[0] - pt[0], distY = circle[1] - pt[1];
if (distX * distX + distY * distY < minDist2)
{
@ -1297,13 +1336,31 @@ static void GetCircleCenters(const std::vector<int> &centers, std::vector<Vec3f>
}
}
static void RemoveOverlaps(std::vector<Vec3f>& circles, float minDist)
static void GetCircleCenters(const std::vector<int> &centers, std::vector<Vec4f> &circles, int acols, float minDist, float dr)
{
size_t centerCnt = centers.size();
float minDist2 = minDist * minDist;
for (size_t i = 0; i < centerCnt; ++i)
{
int center = centers[i];
int y = center / acols;
int x = center - y * acols;
Vec4f circle = Vec4f((x + 0.5f) * dr, (y + 0.5f) * dr, 0, (float)center);
bool goodPoint = CheckDistance(circles, circles.size(), circle, minDist2);
if (goodPoint)
circles.push_back(circle);
}
}
template<typename T>
static void RemoveOverlaps(std::vector<T>& circles, float minDist)
{
float minDist2 = minDist * minDist;
size_t endIdx = 1;
for (size_t i = 1; i < circles.size(); ++i)
{
Vec3f circle = circles[i];
T circle = circles[i];
if (CheckDistance(circles, endIdx, circle, minDist2))
{
circles[endIdx] = circle;
@ -1313,6 +1370,16 @@ static void RemoveOverlaps(std::vector<Vec3f>& circles, float minDist)
circles.resize(endIdx);
}
static void CreateCircles(const std::vector<EstimatedCircle>& circlesEst, std::vector<Vec3f>& circles)
{
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle);
}
static void CreateCircles(const std::vector<EstimatedCircle>& circlesEst, std::vector<Vec4f>& circles)
{
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle4f);
}
template<class NZPoints>
class HoughCircleEstimateRadiusInvoker : public ParallelLoopBody
{
@ -1556,11 +1623,14 @@ inline int HoughCircleEstimateRadiusInvoker<NZPointSet>::filterCircles(const Poi
return nzCount;
}
static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float dp, float minDist,
template <typename CircleType>
static void HoughCirclesGradient(InputArray _image, OutputArray _circles,
float dp, float minDist,
int minRadius, int maxRadius, int cannyThreshold,
int accThreshold, int maxCircles, int kernelSize, bool centersOnly)
{
CV_Assert(kernelSize == -1 || kernelSize == 3 || kernelSize == 5 || kernelSize == 7);
dp = max(dp, 1.f);
float idp = 1.f/dp;
@ -1602,7 +1672,7 @@ static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float
std::sort(centers.begin(), centers.end(), hough_cmp_gt(accum.ptr<int>()));
std::vector<Vec3f> circles;
std::vector<CircleType> circles;
circles.reserve(256);
if (centersOnly)
{
@ -1635,15 +1705,16 @@ static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float
// Sort by accumulator value
std::sort(circlesEst.begin(), circlesEst.end(), cmpAccum);
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle);
// Create Circles
CreateCircles(circlesEst, circles);
RemoveOverlaps(circles, minDist);
}
if (circles.size() > 0)
{
int numCircles = std::min(maxCircles, int(circles.size()));
_circles.create(1, numCircles, CV_32FC3);
Mat(1, numCircles, CV_32FC3, &circles[0]).copyTo(_circles.getMat());
Mat(1, numCircles, cv::traits::Type<CircleType>::value, &circles[0]).copyTo(_circles);
return;
}
}
@ -1656,6 +1727,13 @@ static void HoughCircles( InputArray _image, OutputArray _circles,
{
CV_INSTRUMENT_REGION()
int type = CV_32FC3;
if( _circles.fixedType() )
{
type = _circles.type();
CV_CheckType(type, type == CV_32FC3 || type == CV_32FC4, "Wrong type of output circles");
}
CV_Assert(!_image.empty() && _image.type() == CV_8UC1 && (_image.isMat() || _image.isUMat()));
CV_Assert(_circles.isMat() || _circles.isVector());
@ -1679,9 +1757,16 @@ static void HoughCircles( InputArray _image, OutputArray _circles,
switch( method )
{
case CV_HOUGH_GRADIENT:
HoughCirclesGradient(_image, _circles, (float)dp, (float)minDist,
if (type == CV_32FC3)
HoughCirclesGradient<Vec3f>(_image, _circles, (float)dp, (float)minDist,
minRadius, maxRadius, cannyThresh,
accThresh, maxCircles, kernelSize, centersOnly);
else if (type == CV_32FC4)
HoughCirclesGradient<Vec4f>(_image, _circles, (float)dp, (float)minDist,
minRadius, maxRadius, cannyThresh,
accThresh, maxCircles, kernelSize, centersOnly);
else
CV_Error(Error::StsError, "Internal error");
break;
default:
CV_Error( Error::StsBadArg, "Unrecognized method id. Actually only CV_HOUGH_GRADIENT is supported." );
@ -1764,12 +1849,12 @@ cvHoughLines2( CvArr* src_image, void* lineStorage, int method,
switch( method )
{
case CV_HOUGH_STANDARD:
HoughLinesStandard( image, (float)rho,
(float)theta, threshold, l2, linesMax, min_theta, max_theta );
HoughLinesStandard( image, l2, CV_32FC2, (float)rho,
(float)theta, threshold, linesMax, min_theta, max_theta );
break;
case CV_HOUGH_MULTI_SCALE:
HoughLinesSDiv( image, (float)rho, (float)theta,
threshold, iparam1, iparam2, l2, linesMax, min_theta, max_theta );
HoughLinesSDiv( image, l2, CV_32FC2, (float)rho, (float)theta,
threshold, iparam1, iparam2, linesMax, min_theta, max_theta );
break;
case CV_HOUGH_PROBABILISTIC:
HoughLinesProbabilistic( image, (float)rho, (float)theta,

@ -2119,4 +2119,79 @@ TEST(Imgproc_MorphEx, hitmiss_zero_kernel)
ASSERT_DOUBLE_EQ(cvtest::norm(dst, src, NORM_INF), 0.);
}
TEST(Imgproc_Filter2D, dftFilter2d_regression_10683)
{
uchar src_[24*24] = {
0, 40, 0, 0, 255, 0, 0, 78, 131, 0, 196, 0, 255, 0, 0, 0, 0, 255, 70, 0, 255, 0, 0, 0,
0, 0, 255, 204, 0, 0, 255, 93, 255, 0, 0, 255, 12, 0, 0, 0, 255, 121, 0, 255, 0, 0, 0, 255,
0, 178, 0, 25, 67, 0, 165, 0, 255, 0, 0, 181, 151, 175, 0, 0, 32, 0, 0, 255, 165, 93, 0, 255,
255, 255, 0, 0, 255, 126, 0, 0, 0, 0, 133, 29, 9, 0, 220, 255, 0, 142, 255, 255, 255, 0, 255, 0,
255, 32, 255, 0, 13, 237, 0, 0, 0, 0, 0, 19, 90, 0, 0, 85, 122, 62, 95, 29, 255, 20, 0, 0,
0, 0, 166, 41, 0, 48, 70, 0, 68, 0, 255, 0, 139, 7, 63, 144, 0, 204, 0, 0, 0, 98, 114, 255,
105, 0, 0, 0, 0, 255, 91, 0, 73, 0, 255, 0, 0, 0, 255, 198, 21, 0, 0, 0, 255, 43, 153, 128,
0, 98, 26, 0, 101, 0, 0, 0, 255, 0, 0, 0, 255, 77, 56, 0, 241, 0, 169, 132, 0, 255, 186, 255,
255, 87, 0, 1, 0, 0, 10, 39, 120, 0, 23, 69, 207, 0, 0, 0, 0, 84, 0, 0, 0, 0, 255, 0,
255, 0, 0, 136, 255, 77, 247, 0, 67, 0, 15, 255, 0, 143, 0, 243, 255, 0, 0, 238, 255, 0, 255, 8,
42, 0, 0, 255, 29, 0, 0, 0, 255, 255, 255, 75, 0, 0, 0, 255, 0, 0, 255, 38, 197, 0, 255, 87,
0, 123, 17, 0, 234, 0, 0, 149, 0, 0, 255, 16, 0, 0, 0, 255, 0, 255, 0, 38, 0, 114, 255, 76,
0, 0, 8, 0, 255, 0, 0, 0, 220, 0, 11, 255, 0, 0, 55, 98, 0, 0, 0, 255, 0, 175, 255, 110,
235, 0, 175, 0, 255, 227, 38, 206, 0, 0, 255, 246, 0, 0, 123, 183, 255, 0, 0, 255, 0, 156, 0, 54,
0, 255, 0, 202, 0, 0, 0, 0, 157, 0, 255, 63, 0, 0, 0, 0, 0, 255, 132, 0, 255, 0, 0, 0,
0, 0, 0, 255, 0, 0, 128, 126, 0, 243, 46, 7, 0, 211, 108, 166, 0, 0, 162, 227, 0, 204, 0, 51,
255, 216, 0, 0, 43, 0, 255, 40, 188, 188, 255, 0, 0, 255, 34, 0, 0, 168, 0, 0, 0, 35, 0, 0,
0, 80, 131, 255, 0, 255, 10, 0, 0, 0, 180, 255, 209, 255, 173, 34, 0, 66, 0, 49, 0, 255, 83, 0,
0, 204, 0, 91, 0, 0, 0, 205, 84, 0, 0, 0, 92, 255, 91, 0, 126, 0, 185, 145, 0, 0, 9, 0,
255, 0, 0, 255, 255, 0, 0, 255, 0, 0, 216, 0, 187, 221, 0, 0, 141, 0, 0, 209, 0, 0, 255, 0,
255, 0, 0, 154, 150, 0, 0, 0, 148, 0, 201, 255, 0, 255, 16, 0, 0, 160, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 255, 0, 255, 0, 255, 0, 255, 198, 255, 147, 131, 0, 255, 202, 0, 0, 0, 0, 255, 0,
0, 0, 0, 164, 181, 0, 0, 0, 69, 255, 31, 0, 255, 195, 0, 0, 255, 164, 109, 0, 0, 202, 0, 206,
0, 0, 61, 235, 33, 255, 77, 0, 0, 0, 0, 85, 0, 228, 0, 0, 0, 0, 255, 0, 0, 5, 255, 255
};
Mat_<uchar> src(24, 24, src_);
Mat dst = Mat::zeros(src.size(), src.type());
int sz = 12, size2 = sz * sz;
Mat kernel = Mat::ones(sz, sz, CV_32F) / size2;
uchar expected_[24*24] = {
83, 83, 77, 80, 76, 76, 76, 75, 71, 67, 72, 71, 73, 70, 80, 83, 86, 84, 89, 88, 88, 96, 99, 98,
83, 83, 77, 80, 76, 76, 76, 75, 71, 67, 72, 71, 73, 70, 80, 83, 86, 84, 89, 88, 88, 96, 99, 98,
82, 82, 77, 80, 77, 75, 74, 75, 70, 68, 71, 72, 72, 72, 82, 84, 88, 88, 93, 92, 93, 100, 105, 104,
76, 76, 72, 77, 73, 74, 73, 74, 69, 68, 71, 71, 73, 72, 82, 81, 86, 87, 92, 91, 92, 98, 103, 102,
75, 75, 72, 77, 73, 72, 75, 76, 74, 71, 73, 75, 76, 72, 81, 80, 85, 87, 90, 89, 90, 97, 102, 97,
74, 74, 71, 77, 72, 74, 77, 76, 74, 72, 74, 76, 77, 76, 84, 83, 85, 87, 90, 92, 93, 100, 102, 99,
72, 72, 69, 71, 68, 73, 73, 73, 70, 69, 74, 72, 75, 75, 81, 82, 85, 87, 90, 94, 96, 103, 102, 101,
71, 71, 68, 70, 68, 71, 73, 71, 69, 68, 74, 72, 73, 73, 81, 80, 84, 89, 91, 99, 102, 107, 106, 105,
74, 74, 70, 69, 67, 73, 76, 72, 69, 70, 79, 75, 74, 75, 82, 83, 88, 91, 92, 100, 104, 108, 106, 105,
75, 75, 71, 70, 67, 75, 76, 71, 67, 68, 75, 72, 72, 75, 81, 83, 87, 89, 89, 97, 102, 107, 103, 103,
69, 69, 67, 67, 65, 72, 74, 71, 70, 70, 75, 74, 74, 75, 80, 80, 84, 85, 85, 92, 96, 100, 97, 97,
67, 67, 67, 68, 67, 77, 79, 75, 74, 76, 81, 78, 81, 80, 84, 81, 84, 83, 83, 91, 94, 95, 93, 93,
73, 73, 71, 73, 70, 80, 82, 79, 80, 83, 85, 82, 82, 82, 87, 84, 88, 87, 84, 91, 93, 94, 93, 92,
72, 72, 74, 75, 71, 80, 81, 79, 80, 82, 82, 80, 82, 84, 88, 83, 87, 87, 83, 88, 88, 89, 90, 90,
78, 78, 81, 80, 74, 84, 86, 82, 85, 86, 85, 81, 83, 83, 86, 84, 85, 84, 78, 85, 82, 83, 85, 84,
81, 81, 84, 81, 75, 86, 90, 85, 89, 91, 89, 84, 86, 87, 90, 87, 89, 85, 78, 84, 79, 80, 81, 81,
76, 76, 80, 79, 73, 86, 90, 87, 92, 95, 92, 87, 91, 92, 93, 87, 89, 84, 77, 81, 76, 74, 76, 76,
77, 77, 80, 77, 72, 83, 86, 86, 93, 95, 91, 87, 92, 92, 93, 87, 90, 84, 79, 79, 75, 72, 75, 72,
80, 80, 81, 79, 72, 82, 86, 86, 95, 97, 89, 87, 89, 89, 91, 85, 88, 84, 79, 80, 73, 69, 74, 73,
82, 82, 82, 80, 74, 83, 86, 87, 98, 100, 90, 90, 93, 94, 94, 89, 90, 84, 82, 79, 71, 68, 72, 69,
76, 76, 77, 76, 70, 81, 83, 88, 99, 102, 92, 91, 97, 97, 97, 90, 90, 86, 83, 81, 70, 67, 70, 68,
75, 75, 76, 74, 69, 79, 84, 88, 102, 106, 95, 94, 99, 98, 98, 90, 89, 86, 82, 79, 67, 62, 65, 62,
80, 80, 82, 78, 71, 82, 87, 90, 105, 108, 96, 94, 99, 98, 97, 88, 88, 85, 81, 79, 65, 61, 65, 60,
77, 77, 80, 75, 66, 76, 81, 87, 102, 105, 92, 91, 95, 97, 96, 88, 89, 88, 84, 81, 67, 63, 68, 63
};
Mat_<uchar> expected(24, 24, expected_);
for(int r = 0; r < src.rows / 3; ++r)
{
for(int c = 0; c < src.cols / 3; ++c)
{
cv::Rect region(c * 3, r * 3, 3, 3);
Mat roi_i(src, region);
Mat roi_o(dst, region);
cv::filter2D(roi_i, roi_o, -1, kernel);
}
}
EXPECT_LE(cvtest::norm(dst, expected, NORM_INF), 2);
}
}} // namespace

@ -49,6 +49,8 @@ namespace opencv_test { namespace {
#define DEBUG_IMAGES 0
#endif
//#define GENERATE_DATA // generate data in debug mode via CPU code path (without IPP / OpenCL and other accelerators)
using namespace cv;
using namespace std;
@ -109,7 +111,8 @@ public:
{
}
void run_test()
template <typename CircleType>
void run_test(const char* xml_name)
{
string test_case_name = getTestCaseName(picture_name, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
string filename = cvtest::TS::ptr()->get_data_path() + picture_name;
@ -118,7 +121,7 @@ public:
GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles;
vector<CircleType> circles;
const double dp = 1.0;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
@ -127,31 +130,37 @@ public:
highlightCircles(filename, circles, imgProc + test_case_name + ".png");
#endif
string xml = imgProc + "HoughCircles.xml";
string xml = imgProc + xml_name;
#ifdef GENERATE_DATA
{
FileStorage fs(xml, FileStorage::READ);
FileNode node = fs[test_case_name];
if (node.empty())
ASSERT_TRUE(!fs.isOpened() || fs[test_case_name].empty());
}
{
fs.release();
fs.open(xml, FileStorage::APPEND);
FileStorage fs(xml, FileStorage::APPEND);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
fs << test_case_name << circles;
fs.release();
fs.open(xml, FileStorage::READ);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
}
vector<Vec3f> exp_circles;
read(fs[test_case_name], exp_circles, vector<Vec3f>());
#else
FileStorage fs(xml, FileStorage::READ);
FileNode node = fs[test_case_name];
ASSERT_FALSE(node.empty()) << "Missing test data: " << test_case_name << std::endl << "XML: " << xml;
vector<CircleType> exp_circles;
read(fs[test_case_name], exp_circles, vector<CircleType>());
fs.release();
EXPECT_EQ(exp_circles.size(), circles.size());
#endif
}
};
TEST_P(HoughCirclesTestFixture, regression)
{
run_test();
run_test<Vec3f>("HoughCircles.xml");
}
TEST_P(HoughCirclesTestFixture, regression4f)
{
run_test<Vec4f>("HoughCircles4f.xml");
}
INSTANTIATE_TEST_CASE_P(ImgProc, HoughCirclesTestFixture, testing::Combine(
@ -186,7 +195,9 @@ TEST(HoughCirclesTest, DefaultMaxRadius)
GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -220,7 +231,9 @@ TEST(HoughCirclesTest, CentersOnly)
GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -231,6 +244,9 @@ TEST(HoughCirclesTest, CentersOnly)
for (size_t i = 0; i < circles.size(); ++i)
{
EXPECT_EQ(circles[i][2], 0.0f) << "Did not ask for radius";
EXPECT_EQ(circles[i][0], circles4f[i][0]);
EXPECT_EQ(circles[i][1], circles4f[i][1]);
EXPECT_EQ(circles[i][2], circles4f[i][2]);
}
}
@ -249,7 +265,9 @@ TEST(HoughCirclesTest, ManySmallCircles)
EXPECT_FALSE(src.empty()) << "Invalid test image: " << filename;
vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -258,6 +276,7 @@ TEST(HoughCirclesTest, ManySmallCircles)
#endif
EXPECT_GT(circles.size(), size_t(3000)) << "Should find a lot of circles";
EXPECT_EQ(circles.size(), circles4f.size());
}
}} // namespace

@ -43,6 +43,8 @@
#include "test_precomp.hpp"
//#define GENERATE_DATA // generate data in debug mode via CPU code path (without IPP / OpenCL and other accelerators)
namespace opencv_test { namespace {
template<typename T>
@ -52,30 +54,36 @@ struct SimilarWith
float theta_eps;
float rho_eps;
SimilarWith<T>(T val, float e, float r_e): value(val), theta_eps(e), rho_eps(r_e) { };
bool operator()(T other);
bool operator()(const T& other);
};
template<>
bool SimilarWith<Vec2f>::operator()(Vec2f other)
bool SimilarWith<Vec2f>::operator()(const Vec2f& other)
{
return std::abs(other[0] - value[0]) < rho_eps && std::abs(other[1] - value[1]) < theta_eps;
}
template<>
bool SimilarWith<Vec3f>::operator()(const Vec3f& other)
{
return std::abs(other[0] - value[0]) < rho_eps && std::abs(other[1] - value[1]) < theta_eps;
}
template<>
bool SimilarWith<Vec4i>::operator()(Vec4i other)
bool SimilarWith<Vec4i>::operator()(const Vec4i& other)
{
return cv::norm(value, other) < theta_eps;
}
template <typename T>
int countMatIntersection(Mat expect, Mat actual, float eps, float rho_eps)
int countMatIntersection(const Mat& expect, const Mat& actual, float eps, float rho_eps)
{
int count = 0;
if (!expect.empty() && !actual.empty())
{
for (MatIterator_<T> it=expect.begin<T>(); it!=expect.end<T>(); it++)
for (MatConstIterator_<T> it=expect.begin<T>(); it!=expect.end<T>(); it++)
{
MatIterator_<T> f = std::find_if(actual.begin<T>(), actual.end<T>(), SimilarWith<T>(*it, eps, rho_eps));
MatConstIterator_<T> f = std::find_if(actual.begin<T>(), actual.end<T>(), SimilarWith<T>(*it, eps, rho_eps));
if (f != actual.end<T>())
count++;
}
@ -99,7 +107,8 @@ class BaseHoughLineTest
public:
enum {STANDART = 0, PROBABILISTIC};
protected:
void run_test(int type);
template<typename LinesType, typename LineType>
void run_test(int type, const char* xml_name);
string picture_name;
double rhoStep;
@ -162,23 +171,20 @@ public:
}
};
void BaseHoughLineTest::run_test(int type)
template<typename LinesType, typename LineType>
void BaseHoughLineTest::run_test(int type, const char* xml_name)
{
string filename = cvtest::TS::ptr()->get_data_path() + picture_name;
Mat src = imread(filename, IMREAD_GRAYSCALE);
EXPECT_FALSE(src.empty()) << "Invalid test image: " << filename;
ASSERT_FALSE(src.empty()) << "Invalid test image: " << filename;
string xml;
if (type == STANDART)
xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/HoughLines.xml";
else if (type == PROBABILISTIC)
xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/HoughLinesP.xml";
string xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/" + xml_name;
Mat dst;
Canny(src, dst, 100, 150, 3);
EXPECT_FALSE(dst.empty()) << "Failed Canny edge detector";
ASSERT_FALSE(dst.empty()) << "Failed Canny edge detector";
Mat lines;
LinesType lines;
if (type == STANDART)
HoughLines(dst, lines, rhoStep, thetaStep, threshold, 0, 0);
else if (type == PROBABILISTIC)
@ -188,34 +194,40 @@ void BaseHoughLineTest::run_test(int type)
threshold, minLineLength, maxGap);
test_case_name = getTestCaseName(test_case_name);
#ifdef GENERATE_DATA
{
FileStorage fs(xml, FileStorage::READ);
FileNode node = fs[test_case_name];
if (node.empty())
ASSERT_TRUE(!fs.isOpened() || fs[test_case_name].empty());
}
{
fs.release();
fs.open(xml, FileStorage::APPEND);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
fs << test_case_name << lines;
fs.release();
fs.open(xml, FileStorage::READ);
FileStorage fs(xml, FileStorage::APPEND);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
fs << test_case_name << Mat(lines);
}
#else
FileStorage fs(xml, FileStorage::READ);
FileNode node = fs[test_case_name];
ASSERT_FALSE(node.empty()) << "Missing test data: " << test_case_name << std::endl << "XML: " << xml;
Mat exp_lines;
read( fs[test_case_name], exp_lines, Mat() );
Mat exp_lines_;
read(fs[test_case_name], exp_lines_, Mat());
fs.release();
LinesType exp_lines;
exp_lines_.copyTo(exp_lines);
int count = -1;
if (type == STANDART)
count = countMatIntersection<Vec2f>(exp_lines, lines, (float) thetaStep + FLT_EPSILON, (float) rhoStep + FLT_EPSILON);
count = countMatIntersection<LineType>(Mat(exp_lines), Mat(lines), (float) thetaStep + FLT_EPSILON, (float) rhoStep + FLT_EPSILON);
else if (type == PROBABILISTIC)
count = countMatIntersection<Vec4i>(exp_lines, lines, 1e-4f, 0.f);
count = countMatIntersection<LineType>(Mat(exp_lines), Mat(lines), 1e-4f, 0.f);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
EXPECT_GE( count, (int) (exp_lines.total() * 0.8) );
EXPECT_LE(std::abs((double)count - Mat(exp_lines).total()), Mat(exp_lines).total() * 0.25)
<< "count=" << count << " expected=" << Mat(exp_lines).total();
#else
EXPECT_EQ( count, (int) exp_lines.total());
EXPECT_EQ(count, (int)Mat(exp_lines).total());
#endif
#endif // GENERATE_DATA
}
void HoughLinesPointSetTest::run_test(void)
@ -264,12 +276,22 @@ void HoughLinesPointSetTest::run_test(void)
TEST_P(StandartHoughLinesTest, regression)
{
run_test(STANDART);
run_test<Mat, Vec2f>(STANDART, "HoughLines.xml");
}
TEST_P(ProbabilisticHoughLinesTest, regression)
{
run_test(PROBABILISTIC);
run_test<Mat, Vec4i>(PROBABILISTIC, "HoughLinesP.xml");
}
TEST_P(StandartHoughLinesTest, regression_Vec2f)
{
run_test<std::vector<Vec2f>, Vec2f>(STANDART, "HoughLines2f.xml");
}
TEST_P(StandartHoughLinesTest, regression_Vec3f)
{
run_test<std::vector<Vec3f>, Vec3f>(STANDART, "HoughLines3f.xml");
}
TEST_P(HoughLinesPointSetTest, regression)

@ -357,20 +357,22 @@ void CV_RotatedRectangleIntersectionTest::test13()
void CV_RotatedRectangleIntersectionTest::test14()
{
const int kNumTests = 100;
const int kWidth = 5;
const int kHeight = 5;
const float kWidth = 5;
const float kHeight = 5;
RotatedRect rects[2];
std::vector<Point2f> inter;
cv::RNG& rng = cv::theRNG();
for (int i = 0; i < kNumTests; ++i)
{
for (int j = 0; j < 2; ++j)
{
rects[j].center = Point2f((float)(rand() % kWidth), (float)(rand() % kHeight));
rects[j].size = Size2f(rand() % kWidth + 1.0f, rand() % kHeight + 1.0f);
rects[j].angle = (float)(rand() % 360);
rects[j].center = Point2f(rng.uniform(0.0f, kWidth), rng.uniform(0.0f, kHeight));
rects[j].size = Size2f(rng.uniform(1.0f, kWidth), rng.uniform(1.0f, kHeight));
rects[j].angle = rng.uniform(0.0f, 360.0f);
}
rotatedRectangleIntersection(rects[0], rects[1], inter);
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter));
int res = rotatedRectangleIntersection(rects[0], rects[1], inter);
EXPECT_TRUE(res == INTERSECT_NONE || res == INTERSECT_PARTIAL || res == INTERSECT_FULL) << res;
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)) << inter;
}
}

@ -259,7 +259,7 @@ public:
res_pyr[lvl - 1] += up;
}
dst.create(size, CV_32FCC);
res_pyr[0].copyTo(dst.getMat());
res_pyr[0].copyTo(dst);
}
float getContrastWeight() const CV_OVERRIDE { return wcon; }

@ -189,11 +189,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
// bail out to let the user know that it is not available
if (pref) break;
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(capture, cvCreateCameraCapture_MSMF(index))
if (pref) break;
#endif
case CAP_VFW: // or CAP_V4L or CAP_V4L2
#ifdef HAVE_VFW
TRY_OPEN(capture, cvCreateCameraCapture_VFW(index))
@ -304,12 +299,6 @@ CV_IMPL CvCapture * cvCreateFileCaptureWithPreference (const char * filename, in
if (apiPreference) break;
#endif
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(result, cvCreateFileCapture_MSMF (filename))
if (apiPreference) break;
#endif
#ifdef HAVE_VFW
case CAP_VFW:
TRY_OPEN(result, cvCreateFileCapture_VFW (filename))
@ -378,11 +367,6 @@ static CvVideoWriter* cvCreateVideoWriterWithPreference(const char* filename, in
default:
//exit if the specified API is unavaliable
if (apiPreference != CAP_ANY) break;
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(result, cvCreateVideoWriter_MSMF(filename, fourcc, fps, frameSize, is_color))
if (apiPreference != CAP_ANY) break;
#endif
#ifdef HAVE_VFW
case CAP_VFW:
TRY_OPEN(result, cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color))
@ -441,6 +425,9 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
#ifdef HAVE_GSTREAMER
CAP_GSTREAMER,
#endif
#ifdef HAVE_MSMF
CAP_MSMF,
#endif
#ifdef HAVE_DSHOW
CAP_DSHOW,
#endif
@ -469,6 +456,7 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
for (int i = 0; domains[i] >= 0; i++)
{
#if defined(HAVE_GSTREAMER) || \
defined(HAVE_MSMF) || \
defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \
defined(HAVE_LIBREALSENSE) || \
@ -484,6 +472,11 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
capture = createGStreamerCapture(index);
break;
#endif
#ifdef HAVE_MSMF
case CAP_MSMF:
capture = cvCreateCapture_MSMF(index);
break; // CAP_MSMF
#endif
#ifdef HAVE_DSHOW
case CAP_DSHOW:
capture = makePtr<VideoCapture_DShow>(index);
@ -549,6 +542,14 @@ static Ptr<IVideoCapture> IVideoCapture_create(const String& filename, int apiPr
return capture;
}
#endif
#ifdef HAVE_MSMF
if (useAny || apiPreference == CAP_MSMF)
{
capture = cvCreateCapture_MSMF(filename);
if (capture && capture->isOpened())
return capture;
}
#endif
#ifdef HAVE_GPHOTO2
if (useAny || apiPreference == CAP_GPHOTO2)
{
@ -587,6 +588,14 @@ static Ptr<IVideoWriter> IVideoWriter_create(const String& filename, int apiPref
return iwriter;
}
#endif
#ifdef HAVE_MSMF
if (apiPreference == CAP_MSMF || apiPreference == CAP_ANY)
{
iwriter = cvCreateVideoWriter_MSMF(filename, _fourcc, fps, frameSize, isColor);
if (!iwriter.empty())
return iwriter;
}
#endif
#ifdef HAVE_MFX
if (apiPreference == CAP_INTEL_MFX || apiPreference == CAP_ANY)
{

@ -125,7 +125,7 @@ private:
gst_init(NULL, NULL);
guint major, minor, micro, nano;
gst_version(&major, &minor, &micro, &nano);
if (GST_VERSION_MAJOR == major)
if (GST_VERSION_MAJOR != major)
{
CV_WARN("incompatible gstreamer version");
}
@ -268,7 +268,6 @@ bool GStreamerCapture::grabFrame()
sample = gst_app_sink_pull_sample(GST_APP_SINK(sink));
if(!sample)
return false;
gst_sample_ref(sample);
#endif
if (isPosFramesEmulated)

@ -681,7 +681,7 @@ void MediaType::Clear()
}
/******* Capturing video from camera or file via Microsoft Media Foundation **********/
class CvCapture_MSMF : public CvCapture
class CvCapture_MSMF : public cv::IVideoCapture
{
public:
typedef enum {
@ -689,14 +689,17 @@ public:
MODE_HW = 1
} MSMFCapture_Mode;
CvCapture_MSMF();
CvCapture_MSMF(int);
CvCapture_MSMF(const cv::String&);
virtual ~CvCapture_MSMF();
virtual bool open(int index);
virtual bool open(const char* filename);
virtual bool open(int);
virtual bool open(const cv::String&);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE { return isOpen; }
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_MSMF; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
double getFramerate(MediaType MT) const;
@ -723,8 +726,7 @@ protected:
LONGLONG frameStep;
_ComPtr<IMFSample> videoSample;
LONGLONG sampleTime;
IplImage* frame;
bool isOpened;
bool isOpen;
};
CvCapture_MSMF::CvCapture_MSMF():
@ -743,10 +745,11 @@ CvCapture_MSMF::CvCapture_MSMF():
aspectN(1),
aspectD(1),
sampleTime(0),
frame(NULL),
isOpened(false)
isOpen(false)
{
}
CvCapture_MSMF::CvCapture_MSMF(int index) : CvCapture_MSMF() { open(index); }
CvCapture_MSMF::CvCapture_MSMF(const cv::String& _filename) : CvCapture_MSMF() { open(_filename); }
CvCapture_MSMF::~CvCapture_MSMF()
{
@ -755,15 +758,13 @@ CvCapture_MSMF::~CvCapture_MSMF()
void CvCapture_MSMF::close()
{
if (isOpened)
if (isOpen)
{
isOpened = false;
isOpen = false;
if (videoSample)
videoSample.Reset();
if (videoFileSource)
videoFileSource.Reset();
if (frame)
cvReleaseImage(&frame);
camid = -1;
filename = "";
}
@ -775,7 +776,7 @@ bool CvCapture_MSMF::configureHW(bool enable)
if ((enable && D3DMgr && D3DDev) || (!enable && !D3DMgr && !D3DDev))
return true;
bool reopen = isOpened;
bool reopen = isOpen;
int prevcam = camid;
cv::String prevfile = filename;
close();
@ -973,7 +974,7 @@ bool CvCapture_MSMF::open(int _index)
#endif
if (SUCCEEDED(MFCreateSourceReaderFromMediaSource(mSrc.Get(), srAttr.Get(), &videoFileSource)))
{
isOpened = true;
isOpen = true;
duration = 0;
if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat))
{
@ -992,13 +993,13 @@ bool CvCapture_MSMF::open(int _index)
CoTaskMemFree(ppDevices);
}
return isOpened;
return isOpen;
}
bool CvCapture_MSMF::open(const char* _filename)
bool CvCapture_MSMF::open(const cv::String& _filename)
{
close();
if (!_filename)
if (_filename.empty())
return false;
// Set source reader parameters
@ -1014,11 +1015,11 @@ bool CvCapture_MSMF::open(const char* _filename)
if(D3DMgr)
srAttr->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, D3DMgr.Get());
#endif
cv::AutoBuffer<wchar_t> unicodeFileName(strlen(_filename) + 1);
MultiByteToWideChar(CP_ACP, 0, _filename, -1, unicodeFileName, (int)strlen(_filename) + 1);
cv::AutoBuffer<wchar_t> unicodeFileName(_filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, _filename.c_str(), -1, unicodeFileName, (int)_filename.length() + 1);
if (SUCCEEDED(MFCreateSourceReaderFromURL(unicodeFileName, srAttr.Get(), &videoFileSource)))
{
isOpened = true;
isOpen = true;
sampleTime = 0;
if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat))
{
@ -1039,12 +1040,12 @@ bool CvCapture_MSMF::open(const char* _filename)
}
}
return isOpened;
return isOpen;
}
bool CvCapture_MSMF::grabFrame()
{
if (isOpened)
if (isOpen)
{
DWORD streamIndex, flags;
if (videoSample)
@ -1112,7 +1113,7 @@ bool CvCapture_MSMF::grabFrame()
return false;
}
IplImage* CvCapture_MSMF::retrieveFrame(int)
bool CvCapture_MSMF::retrieveFrame(int, cv::OutputArray frame)
{
DWORD bcnt;
if (videoSample && SUCCEEDED(videoSample->GetBufferCount(&bcnt)) && bcnt > 0)
@ -1128,56 +1129,46 @@ IplImage* CvCapture_MSMF::retrieveFrame(int)
{
if ((unsigned int)cursize == captureFormat.MF_MT_SAMPLE_SIZE)
{
if (!frame || (int)captureFormat.width != frame->width || (int)captureFormat.height != frame->height)
{
cvReleaseImage(&frame);
unsigned int bytes = outputFormat == CV_CAP_MODE_GRAY || !convertFormat ? 1 : outputFormat == CV_CAP_MODE_YUYV ? 2 : 3;
frame = cvCreateImage(cvSize(captureFormat.width, captureFormat.height), 8, bytes);
}
switch (outputFormat)
{
case CV_CAP_MODE_YUYV:
memcpy(frame->imageData, ptr, cursize);
cv::Mat(captureFormat.height, captureFormat.width, CV_8UC2, ptr).copyTo(frame);
break;
case CV_CAP_MODE_BGR:
if (captureMode == MODE_HW)
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), cv::cvarrToMat(frame), cv::COLOR_BGRA2BGR);
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), frame, cv::COLOR_BGRA2BGR);
else
memcpy(frame->imageData, ptr, cursize);
cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr).copyTo(frame);
break;
case CV_CAP_MODE_RGB:
if (captureMode == MODE_HW)
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), cv::cvarrToMat(frame), cv::COLOR_BGRA2BGR);
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), frame, cv::COLOR_BGRA2BGR);
else
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr), cv::cvarrToMat(frame), cv::COLOR_BGR2RGB);
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr), frame, cv::COLOR_BGR2RGB);
break;
case CV_CAP_MODE_GRAY:
memcpy(frame->imageData, ptr, captureFormat.height*captureFormat.width);
cv::Mat(captureFormat.height, captureFormat.width, CV_8UC1, ptr).copyTo(frame);
break;
default:
cvReleaseImage(&frame);
frame.release();
break;
}
}
else
cvReleaseImage(&frame);
frame.release();
}
else
{
if (!frame || frame->width != (int)cursize || frame->height != 1)
{
cvReleaseImage(&frame);
frame = cvCreateImage(cvSize(cursize, 1), 8, 1);
}
memcpy(frame->imageData, ptr, cursize);
cv::Mat(1, cursize, CV_8UC1, ptr).copyTo(frame);
}
buf->Unlock();
return frame;
return !frame.empty();
}
}
}
return NULL;
frame.release();
return false;
}
double CvCapture_MSMF::getFramerate(MediaType MT) const
@ -1227,7 +1218,7 @@ double CvCapture_MSMF::getProperty( int property_id ) const
return aspectN;
else if (property_id == CV_CAP_PROP_SAR_DEN)
return aspectD;
else if (isOpened)
else if (isOpen)
switch (property_id)
{
case CV_CAP_PROP_FRAME_WIDTH:
@ -1521,7 +1512,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
// image capture properties
if (property_id == CV_CAP_PROP_FORMAT)
{
if (isOpened)
if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, (int)cvRound(value), convertFormat);
else
outputFormat = (int)cvRound(value);
@ -1541,7 +1532,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
}
else if (property_id == CV_CAP_PROP_CONVERT_RGB)
{
if (isOpened)
if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, outputFormat, value != 0);
else
convertFormat = value != 0;
@ -1549,7 +1540,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
}
else if (property_id == CV_CAP_PROP_SAR_NUM && value > 0)
{
if (isOpened)
if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), (UINT32)cvRound(value), aspectD, outputFormat, convertFormat);
else
aspectN = (UINT32)cvRound(value);
@ -1557,13 +1548,13 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
}
else if (property_id == CV_CAP_PROP_SAR_DEN && value > 0)
{
if (isOpened)
if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, (UINT32)cvRound(value), outputFormat, convertFormat);
else
aspectD = (UINT32)cvRound(value);
return true;
}
else if (isOpened)
else if (isOpen)
switch (property_id)
{
case CV_CAP_PROP_FRAME_WIDTH:
@ -1778,41 +1769,20 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
return false;
}
CvCapture* cvCreateCameraCapture_MSMF( int index )
{
CvCapture_MSMF* capture = new CvCapture_MSMF;
try
cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF( int index )
{
if( capture->open( index ))
cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>(index);
if (capture && capture->isOpened())
return capture;
}
catch(...)
{
delete capture;
throw;
}
delete capture;
return 0;
return cv::Ptr<cv::IVideoCapture>();
}
CvCapture* cvCreateFileCapture_MSMF (const char* filename)
{
CvCapture_MSMF* capture = new CvCapture_MSMF;
try
cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF (const cv::String& filename)
{
if( capture->open(filename) )
cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>(filename);
if (capture && capture->isOpened())
return capture;
else
{
delete capture;
return NULL;
}
}
catch(...)
{
delete capture;
throw;
}
return cv::Ptr<cv::IVideoCapture>();
}
//
@ -1821,15 +1791,21 @@ CvCapture* cvCreateFileCapture_MSMF (const char* filename)
//
//
class CvVideoWriter_MSMF : public CvVideoWriter
class CvVideoWriter_MSMF : public cv::IVideoWriter
{
public:
CvVideoWriter_MSMF();
CvVideoWriter_MSMF(const cv::String& filename, int fourcc,
double fps, cv::Size frameSize, bool isColor);
virtual ~CvVideoWriter_MSMF();
virtual bool open(const char* filename, int fourcc,
double fps, CvSize frameSize, bool isColor);
virtual bool open(const cv::String& filename, int fourcc,
double fps, cv::Size frameSize, bool isColor);
virtual void close();
virtual bool writeFrame(const IplImage* img);
virtual void write(cv::InputArray);
virtual double getProperty(int) const { return 0; }
virtual bool setProperty(int, double) { return false; }
virtual bool isOpened() const { return initiated; }
private:
Media_Foundation& MF;
@ -1857,6 +1833,7 @@ CvVideoWriter_MSMF::CvVideoWriter_MSMF():
initiated(false)
{
}
CvVideoWriter_MSMF::CvVideoWriter_MSMF(const cv::String& filename, int fourcc, double fps, cv::Size frameSize, bool isColor) : CvVideoWriter_MSMF() { open(filename, fourcc, fps, frameSize, isColor); }
CvVideoWriter_MSMF::~CvVideoWriter_MSMF()
{
@ -1916,8 +1893,8 @@ const GUID CvVideoWriter_MSMF::FourCC2GUID(int fourcc)
}
}
bool CvVideoWriter_MSMF::open( const char* filename, int fourcc,
double _fps, CvSize _frameSize, bool /*isColor*/ )
bool CvVideoWriter_MSMF::open( const cv::String& filename, int fourcc,
double _fps, cv::Size _frameSize, bool /*isColor*/ )
{
if (initiated)
close();
@ -1956,8 +1933,8 @@ bool CvVideoWriter_MSMF::open( const char* filename, int fourcc,
)
{
// Create the sink writer
cv::AutoBuffer<wchar_t> unicodeFileName(strlen(filename) + 1);
MultiByteToWideChar(CP_ACP, 0, filename, -1, unicodeFileName, (int)strlen(filename) + 1);
cv::AutoBuffer<wchar_t> unicodeFileName(filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, filename.c_str(), -1, unicodeFileName, (int)filename.length() + 1);
HRESULT hr = MFCreateSinkWriterFromURL(unicodeFileName, NULL, spAttr.Get(), &sinkWriter);
if (SUCCEEDED(hr))
{
@ -1987,12 +1964,12 @@ void CvVideoWriter_MSMF::close()
}
}
bool CvVideoWriter_MSMF::writeFrame(const IplImage* img)
void CvVideoWriter_MSMF::write(cv::InputArray img)
{
if (!img ||
(img->nChannels != 1 && img->nChannels != 3 && img->nChannels != 4) ||
(UINT32)img->width != videoWidth || (UINT32)img->height != videoHeight)
return false;
if (img.empty() ||
(img.channels() != 1 && img.channels() != 3 && img.channels() != 4) ||
(UINT32)img.cols() != videoWidth || (UINT32)img.rows() != videoHeight)
return;
const LONG cbWidth = 4 * videoWidth;
const DWORD cbBuffer = cbWidth * videoHeight;
@ -2014,27 +1991,23 @@ bool CvVideoWriter_MSMF::writeFrame(const IplImage* img)
SUCCEEDED(buffer->Lock(&pData, NULL, NULL)))
{
// Copy the video frame to the buffer.
cv::cvtColor(cv::cvarrToMat(img), cv::Mat(videoHeight, videoWidth, CV_8UC4, pData, cbWidth), img->nChannels > 1 ? cv::COLOR_BGR2BGRA : cv::COLOR_GRAY2BGRA);
cv::cvtColor(img.getMat(), cv::Mat(videoHeight, videoWidth, CV_8UC4, pData, cbWidth), img.channels() > 1 ? cv::COLOR_BGR2BGRA : cv::COLOR_GRAY2BGRA);
buffer->Unlock();
// Send media sample to the Sink Writer.
if (SUCCEEDED(sinkWriter->WriteSample(streamIndex, sample.Get())))
{
rtStart += rtDuration;
return true;
}
}
return false;
}
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
double fps, CvSize frameSize, int isColor )
cv::Ptr<cv::IVideoWriter> cv::cvCreateVideoWriter_MSMF( const cv::String& filename, int fourcc,
double fps, cv::Size frameSize, int isColor )
{
CvVideoWriter_MSMF* writer = new CvVideoWriter_MSMF;
if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 ))
cv::Ptr<CvVideoWriter_MSMF> writer = cv::makePtr<CvVideoWriter_MSMF>(filename, fourcc, fps, frameSize, isColor != 0);
if (writer && writer->isOpened())
return writer;
delete writer;
return NULL;
return cv::Ptr<cv::IVideoWriter>();
}
#endif

@ -244,26 +244,26 @@ private:
ApproximateSynchronizerBase(_approxSyncGrabber)
{}
virtual bool isSpinContinue() const
virtual bool isSpinContinue() const CV_OVERRIDE
{
int maxBufferSize = approxSyncGrabber.getMaxBufferSize();
return (maxBufferSize <= 0) || (static_cast<int>(depthQueue.size()) < maxBufferSize &&
static_cast<int>(imageQueue.size()) < maxBufferSize); // "<" to may push
}
virtual inline void pushDepthMetaData( xn::DepthMetaData& depthMetaData )
virtual inline void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) CV_OVERRIDE
{
cv::Ptr<xn::DepthMetaData> depthPtr = cv::makePtr<xn::DepthMetaData>();
depthPtr->CopyFrom(depthMetaData);
depthQueue.push(depthPtr);
}
virtual inline void pushImageMetaData( xn::ImageMetaData& imageMetaData )
virtual inline void pushImageMetaData( xn::ImageMetaData& imageMetaData ) CV_OVERRIDE
{
cv::Ptr<xn::ImageMetaData> imagePtr = cv::makePtr<xn::ImageMetaData>();
imagePtr->CopyFrom(imageMetaData);
imageQueue.push(imagePtr);
}
virtual inline bool popDepthMetaData( xn::DepthMetaData& depthMetaData )
virtual inline bool popDepthMetaData( xn::DepthMetaData& depthMetaData ) CV_OVERRIDE
{
if( depthQueue.empty() )
return false;
@ -272,7 +272,7 @@ private:
depthQueue.pop();
return true;
}
virtual inline bool popImageMetaData( xn::ImageMetaData& imageMetaData )
virtual inline bool popImageMetaData( xn::ImageMetaData& imageMetaData ) CV_OVERRIDE
{
if( imageQueue.empty() )
return false;

@ -116,10 +116,6 @@ CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_MSMF( int index );
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_OpenNI( int index );
CvCapture* cvCreateCameraCapture_OpenNI2( int index );
CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
@ -195,6 +191,10 @@ namespace cv
Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const String& filename);
Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const String& filename, int fourcc, double fps, Size frameSize, int isColor);
Ptr<IVideoCapture> cvCreateCapture_MSMF(int index);
Ptr<IVideoCapture> cvCreateCapture_MSMF(const String& filename);
Ptr<IVideoWriter> cvCreateVideoWriter_MSMF(const String& filename, int fourcc, double fps, Size frameSize, int is_color);
}
#endif /* __VIDEOIO_H_ */

@ -17,36 +17,34 @@ using namespace std;
*/
int main( int argc, char** argv )
{
Mat src, dst;
const char* source_window = "Source image";
const char* equalized_window = "Equalized Image";
/// Load image
//! [Load image]
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
Mat src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Load image]
/// Convert to grayscale
//! [Convert to grayscale]
cvtColor( src, src, COLOR_BGR2GRAY );
//! [Convert to grayscale]
/// Apply Histogram Equalization
//! [Apply Histogram Equalization]
Mat dst;
equalizeHist( src, dst );
//! [Apply Histogram Equalization]
/// Display results
namedWindow( source_window, WINDOW_AUTOSIZE );
namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );
//! [Display results]
imshow( "Source image", src );
imshow( "Equalized Image", dst );
//! [Display results]
/// Wait until user exits the program
waitKey(0);
//! [Wait until user exits the program]
waitKey();
//! [Wait until user exits the program]
return 0;

@ -14,79 +14,93 @@ using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv; Mat hue;
Mat hue;
int bins = 25;
/// Function Headers
void Hist_and_Backproj(int, void* );
/**
* @function main
*/
int main( int, char** argv )
int main( int argc, char* argv[] )
{
/// Read the image
src = imread( argv[1], IMREAD_COLOR );
//! [Read the image]
CommandLineParser parser( argc, argv, "{@input | | input image}" );
Mat src = imread( parser.get<String>( "@input" ) );
if( src.empty() )
{ cout<<"Usage: ./calcBackProject_Demo1 <path_to_image>"<<endl;
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Read the image]
/// Transform it to HSV
//! [Transform it to HSV]
Mat hsv;
cvtColor( src, hsv, COLOR_BGR2HSV );
//! [Transform it to HSV]
/// Use only the Hue value
//! [Use only the Hue value]
hue.create(hsv.size(), hsv.depth());
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
//! [Use only the Hue value]
/// Create Trackbar to enter the number of bins
//! [Create Trackbar to enter the number of bins]
const char* window_image = "Source image";
namedWindow( window_image, WINDOW_AUTOSIZE );
namedWindow( window_image );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);
//! [Create Trackbar to enter the number of bins]
/// Show the image
//! [Show the image]
imshow( window_image, src );
// Wait until user exits the program
waitKey();
//! [Show the image]
/// Wait until user exits the program
waitKey(0);
return 0;
}
/**
* @function Hist_and_Backproj
* @brief Callback to Trackbar
*/
void Hist_and_Backproj(int, void* )
{
MatND hist;
//! [initialize]
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
//! [initialize]
/// Get the Histogram and normalize it
//! [Get the Histogram and normalize it]
Mat hist;
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
//! [Get the Histogram and normalize it]
/// Get Backprojection
MatND backproj;
//! [Get Backprojection]
Mat backproj;
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
//! [Get Backprojection]
/// Draw the backproj
//! [Draw the backproj]
imshow( "BackProj", backproj );
//! [Draw the backproj]
/// Draw the histogram
int w = 400; int h = 400;
//! [Draw the histogram]
int w = 400, h = 400;
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( w, h, CV_8UC3 );
Mat histImg = Mat::zeros( h, w, CV_8UC3 );
for (int i = 0; i < bins; i++)
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); }
{
rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ),
Scalar( 0, 0, 255 ), FILLED );
}
imshow( "Histogram", histImg );
//! [Draw the histogram]
}

@ -14,10 +14,9 @@ using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv;
Mat mask;
Mat src, hsv, mask;
int lo = 20; int up = 20;
int low = 20, up = 20;
const char* window_image = "Source image";
/// Function Headers
@ -30,21 +29,22 @@ void pickPoint (int event, int x, int y, int, void* );
int main( int, char** argv )
{
/// Read the image
src = imread( argv[1], IMREAD_COLOR );
src = imread( argv[1] );
/// Transform it to HSV
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image
namedWindow( window_image, WINDOW_AUTOSIZE );
namedWindow( window_image );
imshow( window_image, src );
/// Set Trackbars for floodfill thresholds
createTrackbar( "Low thresh", window_image, &lo, 255, 0 );
createTrackbar( "Low thresh", window_image, &low, 255, 0 );
createTrackbar( "High thresh", window_image, &up, 255, 0 );
/// Set a Mouse Callback
setMouseCallback( window_image, pickPoint, 0 );
waitKey(0);
waitKey();
return 0;
}
@ -54,7 +54,9 @@ int main( int, char** argv )
void pickPoint (int event, int x, int y, int, void* )
{
if( event != EVENT_LBUTTONDOWN )
{ return; }
{
return;
}
// Fill and get the mask
Point seed = Point( x, y );
@ -65,8 +67,8 @@ void pickPoint (int event, int x, int y, int, void* )
int connectivity = 8;
int flags = connectivity + (newMaskVal << 8 ) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8UC1 );
floodFill( src, mask2, seed, newVal, 0, Scalar( lo, lo, lo ), Scalar( up, up, up), flags );
Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8U );
floodFill( src, mask2, seed, newVal, 0, Scalar( low, low, low ), Scalar( up, up, up), flags );
mask = mask2( Range( 1, mask2.rows - 1 ), Range( 1, mask2.cols - 1 ) );
imshow( "Mask", mask );
@ -79,12 +81,12 @@ void pickPoint (int event, int x, int y, int, void* )
*/
void Hist_and_Backproj( )
{
MatND hist;
Mat hist;
int h_bins = 30; int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_range[] = { 0, 179 };
float s_range[] = { 0, 255 };
float h_range[] = { 0, 180 };
float s_range[] = { 0, 256 };
const float* ranges[] = { h_range, s_range };
int channels[] = { 0, 1 };
@ -95,10 +97,9 @@ void Hist_and_Backproj( )
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
/// Get Backprojection
MatND backproj;
Mat backproj;
calcBackProject( &hsv, 1, channels, hist, backproj, ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );
}

@ -17,53 +17,54 @@ using namespace cv;
*/
int main(int argc, char** argv)
{
Mat src, dst;
/// Load image
String imageName( "../data/lena.jpg" ); // by default
if (argc > 1)
//! [Load image]
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
Mat src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
if( src.empty() )
{
imageName = argv[1];
return -1;
}
//! [Load image]
src = imread( imageName, IMREAD_COLOR );
if( src.empty() )
{ return -1; }
/// Separate the image in 3 places ( B, G and R )
//! [Separate the image in 3 places ( B, G and R )]
vector<Mat> bgr_planes;
split( src, bgr_planes );
//! [Separate the image in 3 places ( B, G and R )]
/// Establish the number of bins
//! [Establish the number of bins]
int histSize = 256;
//! [Establish the number of bins]
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ;
//! [Set the ranges ( for B,G,R) )]
float range[] = { 0, 256 }; //the upper boundary is exclusive
const float* histRange = { range };
//! [Set the ranges ( for B,G,R) )]
bool uniform = true; bool accumulate = false;
//! [Set histogram param]
bool uniform = true, accumulate = false;
//! [Set histogram param]
//! [Compute the histograms]
Mat b_hist, g_hist, r_hist;
/// Compute the histograms:
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
//! [Compute the histograms]
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
//! [Draw the histograms for B, G and R]
int hist_w = 512, hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
//! [Draw the histograms for B, G and R]
/// Normalize the result to [ 0, histImage.rows ]
//! [Normalize the result to ( 0, histImage.rows )]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
//! [Normalize the result to ( 0, histImage.rows )]
/// Draw for each channel
//! [Draw for each channel]
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ),
@ -76,13 +77,13 @@ int main(int argc, char** argv)
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
Scalar( 0, 0, 255), 2, 8, 0 );
}
//! [Draw for each channel]
/// Display
namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
//! [Display]
imshow("Source image", src );
imshow("calcHist Demo", histImage );
waitKey(0);
waitKey();
//! [Display]
return 0;
}

@ -12,42 +12,43 @@
using namespace std;
using namespace cv;
const char* keys =
"{ help h| | Print help message. }"
"{ input1 | | Path to input image 1. }"
"{ input2 | | Path to input image 2. }"
"{ input3 | | Path to input image 3. }";
/**
* @function main
*/
int main( int argc, char** argv )
{
Mat src_base, hsv_base;
Mat src_test1, hsv_test1;
Mat src_test2, hsv_test2;
Mat hsv_half_down;
/// Load three images with different environment settings
if( argc < 4 )
{
printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_settings1> <image_settings2>\n");
return -1;
}
src_base = imread( argv[1], IMREAD_COLOR );
src_test1 = imread( argv[2], IMREAD_COLOR );
src_test2 = imread( argv[3], IMREAD_COLOR );
//! [Load three images with different environment settings]
CommandLineParser parser( argc, argv, keys );
Mat src_base = imread( parser.get<String>("input1") );
Mat src_test1 = imread( parser.get<String>("input2") );
Mat src_test2 = imread( parser.get<String>("input3") );
if( src_base.empty() || src_test1.empty() || src_test2.empty() )
{
cout << "Can't read one of the images" << endl;
cout << "Could not open or find the images!\n" << endl;
parser.printMessage();
return -1;
}
//! [Load three images with different environment settings]
/// Convert to HSV
//! [Convert to HSV]
Mat hsv_base, hsv_test1, hsv_test2;
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
//! [Convert to HSV]
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
//! [Convert to HSV half]
Mat hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows ), Range( 0, hsv_base.cols ) );
//! [Convert to HSV half]
/// Using 50 bins for hue and 60 for saturation
int h_bins = 50; int s_bins = 60;
//! [Using 50 bins for hue and 60 for saturation]
int h_bins = 50, s_bins = 60;
int histSize[] = { h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255
@ -56,17 +57,13 @@ int main( int argc, char** argv )
const float* ranges[] = { h_ranges, s_ranges };
// Use the o-th and 1-st channels
// Use the 0-th and 1-st channels
int channels[] = { 0, 1 };
//! [Using 50 bins for hue and 60 for saturation]
//! [Calculate the histograms for the HSV images]
Mat hist_base, hist_half_down, hist_test1, hist_test2;
/// Histograms
MatND hist_base;
MatND hist_half_down;
MatND hist_test1;
MatND hist_test2;
/// Calculate the histograms for the HSV images
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
@ -78,20 +75,21 @@ int main( int argc, char** argv )
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
//! [Calculate the histograms for the HSV images]
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
//! [Apply the histogram comparison methods]
for( int compare_method = 0; compare_method < 4; compare_method++ )
{
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
cout << "Method " << compare_method << " Perfect, Base-Half, Base-Test(1), Base-Test(2) : "
<< base_base << " / " << base_half << " / " << base_test1 << " / " << base_test2 << endl;
}
//! [Apply the histogram comparison methods]
printf( "Done \n" );
cout << "Done \n";
return 0;
}

@ -12,77 +12,71 @@
using namespace cv;
using namespace std;
/// Global variables
const char* source_window = "Source image";
const char* warp_window = "Warp";
const char* warp_rotate_window = "Warp + Rotate";
/**
* @function main
*/
int main( int argc, char** argv )
{
Point2f srcTri[3];
Point2f dstTri[3];
Mat rot_mat( 2, 3, CV_32FC1 );
Mat warp_mat( 2, 3, CV_32FC1 );
Mat src, warp_dst, warp_rotate_dst;
/// Load the image
//! [Load the image]
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
Mat src = imread( parser.get<String>( "@input" ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Load the image]
/// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1.f, 0 );
srcTri[2] = Point2f( 0, src.rows - 1.f );
//! [Set your 3 points to calculate the Affine Transform]
Point2f srcTri[3];
srcTri[0] = Point2f( 0.f, 0.f );
srcTri[1] = Point2f( src.cols - 1.f, 0.f );
srcTri[2] = Point2f( 0.f, src.rows - 1.f );
dstTri[0] = Point2f( src.cols*0.0f, src.rows*0.33f );
Point2f dstTri[3];
dstTri[0] = Point2f( 0.f, src.rows*0.33f );
dstTri[1] = Point2f( src.cols*0.85f, src.rows*0.25f );
dstTri[2] = Point2f( src.cols*0.15f, src.rows*0.7f );
//! [Set your 3 points to calculate the Affine Transform]
/// Get the Affine Transform
warp_mat = getAffineTransform( srcTri, dstTri );
//! [Get the Affine Transform]
Mat warp_mat = getAffineTransform( srcTri, dstTri );
//! [Get the Affine Transform]
//! [Apply the Affine Transform just found to the src image]
/// Set the dst image the same type and size as src
Mat warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Apply the Affine Transform just found to the src image
warpAffine( src, warp_dst, warp_mat, warp_dst.size() );
//! [Apply the Affine Transform just found to the src image]
/** Rotating the image after Warp */
/// Compute a rotation matrix with respect to the center of the image
//! [Compute a rotation matrix with respect to the center of the image]
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6;
//! [Compute a rotation matrix with respect to the center of the image]
/// Get the rotation matrix with the specifications above
rot_mat = getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
Mat rot_mat = getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
/// Rotate the warped image
//! [Rotate the warped image]
Mat warp_rotate_dst;
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
//! [Rotate the warped image]
//! [Show what you got]
imshow( "Source image", src );
imshow( "Warp", warp_dst );
imshow( "Warp + Rotate", warp_rotate_dst );
//! [Show what you got]
/// Show what you got
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst );
namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst );
/// Wait until user exits the program
waitKey(0);
//! [Wait until user exits the program]
waitKey();
//! [Wait until user exits the program]
return 0;
}

@ -64,36 +64,51 @@ removedNodes = []
# Detect unfused batch normalization nodes and fuse them.
def fuse_batch_normalization():
pattern = ['Add', 'Rsqrt', 'Mul', 'Mul', 'Mul', 'Sub', 'Add']
candidates = []
for node in graph_def.node:
if node.op == pattern[len(candidates)]:
candidates.append(node)
# Add_0 <-- moving_variance, add_y
# Rsqrt <-- Add_0
# Mul_0 <-- Rsqrt, gamma
# Mul_1 <-- input, Mul_0
# Mul_2 <-- moving_mean, Mul_0
# Sub_0 <-- beta, Mul_2
# Add_1 <-- Mul_1, Sub_0
nodesMap = {node.name: node for node in graph_def.node}
subgraph = ['Add',
['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
def checkSubgraph(node, targetNode, inputs, fusedNodes):
op = targetNode[0]
if node.op == op and (len(node.input) >= len(targetNode) - 1):
fusedNodes.append(node)
for i, inpOp in enumerate(targetNode[1:]):
if isinstance(inpOp, list):
if not node.input[i] in nodesMap or \
not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes):
return False
else:
candidates = []
inputs[inpOp] = node.input[i]
if len(candidates) == len(pattern):
inp = candidates[3].input[0]
gamma = candidates[2].input[1]
beta = candidates[5].input[0]
moving_mean = candidates[4].input[0]
moving_variance = candidates[0].input[0]
return True
else:
return False
nodesToRemove = []
for node in graph_def.node:
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraph, inputs, fusedNodes):
name = node.name
node.Clear()
node.name = name
node.op = 'FusedBatchNorm'
node.input.append(inp)
node.input.append(gamma)
node.input.append(beta)
node.input.append(moving_mean)
node.input.append(moving_variance)
node.input.append(inputs['input'])
node.input.append(inputs['gamma'])
node.input.append(inputs['beta'])
node.input.append(inputs['moving_mean'])
node.input.append(inputs['moving_variance'])
text_format.Merge('f: 0.001', node.attr["epsilon"])
for candidate in candidates[:-1]:
graph_def.node.remove(candidate)
candidates = []
nodesToRemove += fusedNodes[1:]
for node in nodesToRemove:
graph_def.node.remove(node)
fuse_batch_normalization()

@ -0,0 +1,173 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.util.Arrays;
import java.util.List;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
public CalcBackProject1(String[] args) {
//! [Read the image]
if (args.length != 1) {
System.err.println("You must supply one argument that corresponds to the path to the image.");
System.exit(0);
}
Mat src = Imgcodecs.imread(args[0]);
if (src.empty()) {
System.err.println("Empty image: " + args[0]);
System.exit(0);
}
//! [Read the image]
//! [Transform it to HSV]
Mat hsv = new Mat();
Imgproc.cvtColor(src, hsv, Imgproc.COLOR_BGR2HSV);
//! [Transform it to HSV]
//! [Use only the Hue value]
hue = new Mat(hsv.size(), hsv.depth());
Core.mixChannels(Arrays.asList(hsv), Arrays.asList(hue), new MatOfInt(0, 0));
//! [Use only the Hue value]
// Create and set up the window.
frame = new JFrame("Back Projection 1 demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
Image img = HighGui.toBufferedImage(src);
addComponentsToPane(frame.getContentPane(), img);
//! [Show the image]
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
// Display the window.
frame.pack();
frame.setVisible(true);
//! [Show the image]
}
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
}
//! [Create Trackbar to enter the number of bins]
JPanel sliderPanel = new JPanel();
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
sliderPanel.add(new JLabel("* Hue bins: "));
JSlider slider = new JSlider(0, MAX_SLIDER, bins);
slider.setMajorTickSpacing(25);
slider.setMinorTickSpacing(5);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
bins = source.getValue();
update();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
//! [Create Trackbar to enter the number of bins]
JPanel imgPanel = new JPanel();
imgLabel = new JLabel(new ImageIcon(img));
imgPanel.add(imgLabel);
backprojLabel = new JLabel();
imgPanel.add(backprojLabel);
histImgLabel = new JLabel();
imgPanel.add(histImgLabel);
pane.add(imgPanel, BorderLayout.CENTER);
}
private void update() {
//! [initialize]
int histSize = Math.max(bins, 2);
float[] hueRange = {0, 180};
//! [initialize]
//! [Get the Histogram and normalize it]
Mat hist = new Mat();
List<Mat> hueList = Arrays.asList(hue);
Imgproc.calcHist(hueList, new MatOfInt(0), new Mat(), hist, new MatOfInt(histSize), new MatOfFloat(hueRange), false);
Core.normalize(hist, hist, 0, 255, Core.NORM_MINMAX);
//! [Get the Histogram and normalize it]
//! [Get Backprojection]
Mat backproj = new Mat();
Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, backproj, new MatOfFloat(hueRange), 1);
//! [Get Backprojection]
//! [Draw the backproj]
Image backprojImg = HighGui.toBufferedImage(backproj);
backprojLabel.setIcon(new ImageIcon(backprojImg));
//! [Draw the backproj]
//! [Draw the histogram]
int w = 400, h = 400;
int binW = (int) Math.round((double) w / histSize);
histImg = Mat.zeros(h, w, CvType.CV_8UC3);
float[] histData = new float[(int) (hist.total() * hist.channels())];
hist.get(0, 0, histData);
for (int i = 0; i < bins; i++) {
Imgproc.rectangle(histImg, new Point(i * binW, h),
new Point((i + 1) * binW, h - Math.round(histData[i] * h / 255.0)), new Scalar(0, 0, 255), Core.FILLED);
}
Image histImage = HighGui.toBufferedImage(histImg);
histImgLabel.setIcon(new ImageIcon(histImage));
//! [Draw the histogram]
frame.repaint();
frame.pack();
}
}
public class CalcBackProjectDemo1 {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Schedule a job for the event dispatch thread:
// creating and showing this application's GUI.
javax.swing.SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
new CalcBackProject1(args);
}
});
}
}

@ -0,0 +1,189 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.util.Arrays;
import java.util.List;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcBackProject2 {
private Mat src;
private Mat hsv = new Mat();
private Mat mask = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel maskImgLabel;
private static final int MAX_SLIDER = 255;
private int low = 20;
private int up = 20;
public CalcBackProject2(String[] args) {
/// Read the image
if (args.length != 1) {
System.err.println("You must supply one argument that corresponds to the path to the image.");
System.exit(0);
}
src = Imgcodecs.imread(args[0]);
if (src.empty()) {
System.err.println("Empty image: " + args[0]);
System.exit(0);
}
/// Transform it to HSV
Imgproc.cvtColor(src, hsv, Imgproc.COLOR_BGR2HSV);
// Create and set up the window.
frame = new JFrame("Back Projection 2 demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
Image img = HighGui.toBufferedImage(src);
addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
// Display the window.
frame.pack();
frame.setVisible(true);
}
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
}
/// Set Trackbars for floodfill thresholds
JPanel sliderPanel = new JPanel();
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
sliderPanel.add(new JLabel("Low thresh"));
JSlider slider = new JSlider(0, MAX_SLIDER, low);
slider.setMajorTickSpacing(20);
slider.setMinorTickSpacing(10);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
low = source.getValue();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
sliderPanel.add(new JLabel("High thresh"));
slider = new JSlider(0, MAX_SLIDER, up);
slider.setMajorTickSpacing(20);
slider.setMinorTickSpacing(10);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
up = source.getValue();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
JPanel imgPanel = new JPanel();
imgLabel = new JLabel(new ImageIcon(img));
/// Set a Mouse Callback
imgLabel.addMouseListener(new MouseAdapter() {
@Override
public void mousePressed(MouseEvent e) {
update(e.getX(), e.getY());
}
});
imgPanel.add(imgLabel);
maskImgLabel = new JLabel();
imgPanel.add(maskImgLabel);
backprojLabel = new JLabel();
imgPanel.add(backprojLabel);
pane.add(imgPanel, BorderLayout.CENTER);
}
private void update(int x, int y) {
// Fill and get the mask
Point seed = new Point(x, y);
int newMaskVal = 255;
Scalar newVal = new Scalar(120, 120, 120);
int connectivity = 8;
int flags = connectivity + (newMaskVal << 8) + Imgproc.FLOODFILL_FIXED_RANGE + Imgproc.FLOODFILL_MASK_ONLY;
Mat mask2 = Mat.zeros(src.rows() + 2, src.cols() + 2, CvType.CV_8U);
Imgproc.floodFill(src, mask2, seed, newVal, new Rect(), new Scalar(low, low, low), new Scalar(up, up, up), flags);
mask = mask2.submat(new Range(1, mask2.rows() - 1), new Range(1, mask2.cols() - 1));
Image maskImg = HighGui.toBufferedImage(mask);
maskImgLabel.setIcon(new ImageIcon(maskImg));
int hBins = 30, sBins = 32;
int[] histSize = { hBins, sBins };
float[] ranges = { 0, 180, 0, 256 };
int[] channels = { 0, 1 };
/// Get the Histogram and normalize it
Mat hist = new Mat();
List<Mat> hsvList = Arrays.asList(hsv);
Imgproc.calcHist(hsvList, new MatOfInt(channels), mask, hist, new MatOfInt(histSize), new MatOfFloat(ranges), false );
Core.normalize(hist, hist, 0, 255, Core.NORM_MINMAX);
/// Get Backprojection
Mat backproj = new Mat();
Imgproc.calcBackProject(hsvList, new MatOfInt(channels), hist, backproj, new MatOfFloat(ranges), 1);
Image backprojImg = HighGui.toBufferedImage(backproj);
backprojLabel.setIcon(new ImageIcon(backprojImg));
frame.repaint();
frame.pack();
}
}
public class CalcBackProjectDemo2 {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Schedule a job for the event dispatch thread:
// creating and showing this application's GUI.
javax.swing.SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
new CalcBackProject2(args);
}
});
}
}

@ -0,0 +1,99 @@
import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcHist {
public void run(String[] args) {
//! [Load image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load image]
//! [Separate the image in 3 places ( B, G and R )]
List<Mat> bgrPlanes = new ArrayList<>();
Core.split(src, bgrPlanes);
//! [Separate the image in 3 places ( B, G and R )]
//! [Establish the number of bins]
int histSize = 256;
//! [Establish the number of bins]
//! [Set the ranges ( for B,G,R) )]
float[] range = {0, 256}; //the upper boundary is exclusive
MatOfFloat histRange = new MatOfFloat(range);
//! [Set the ranges ( for B,G,R) )]
//! [Set histogram param]
boolean accumulate = false;
//! [Set histogram param]
//! [Compute the histograms]
Mat bHist = new Mat(), gHist = new Mat(), rHist = new Mat();
Imgproc.calcHist(bgrPlanes, new MatOfInt(0), new Mat(), bHist, new MatOfInt(histSize), histRange, accumulate);
Imgproc.calcHist(bgrPlanes, new MatOfInt(1), new Mat(), gHist, new MatOfInt(histSize), histRange, accumulate);
Imgproc.calcHist(bgrPlanes, new MatOfInt(2), new Mat(), rHist, new MatOfInt(histSize), histRange, accumulate);
//! [Compute the histograms]
//! [Draw the histograms for B, G and R]
int histW = 512, histH = 400;
int binW = (int) Math.round((double) histW / histSize);
Mat histImage = new Mat( histH, histW, CvType.CV_8UC3, new Scalar( 0,0,0) );
//! [Draw the histograms for B, G and R]
//! [Normalize the result to ( 0, histImage.rows )]
Core.normalize(bHist, bHist, 0, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(gHist, gHist, 0, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(rHist, rHist, 0, histImage.rows(), Core.NORM_MINMAX);
//! [Normalize the result to ( 0, histImage.rows )]
//! [Draw for each channel]
float[] bHistData = new float[(int) (bHist.total() * bHist.channels())];
bHist.get(0, 0, bHistData);
float[] gHistData = new float[(int) (gHist.total() * gHist.channels())];
gHist.get(0, 0, gHistData);
float[] rHistData = new float[(int) (rHist.total() * rHist.channels())];
rHist.get(0, 0, rHistData);
for( int i = 1; i < histSize; i++ ) {
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(bHistData[i - 1])),
new Point(binW * (i), histH - Math.round(bHistData[i])), new Scalar(255, 0, 0), 2);
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(gHistData[i - 1])),
new Point(binW * (i), histH - Math.round(gHistData[i])), new Scalar(0, 255, 0), 2);
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(rHistData[i - 1])),
new Point(binW * (i), histH - Math.round(rHistData[i])), new Scalar(0, 0, 255), 2);
}
//! [Draw for each channel]
//! [Display]
HighGui.imshow( "Source image", src );
HighGui.imshow( "calcHist Demo", histImage );
HighGui.waitKey(0);
//! [Display]
System.exit(0);
}
}
public class CalcHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new CalcHist().run(args);
}
}

@ -0,0 +1,91 @@
import java.util.Arrays;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Range;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CompareHist {
public void run(String[] args) {
//! [Load three images with different environment settings]
if (args.length != 3) {
System.err.println("You must supply 3 arguments that correspond to the paths to 3 images.");
System.exit(0);
}
Mat srcBase = Imgcodecs.imread(args[0]);
Mat srcTest1 = Imgcodecs.imread(args[1]);
Mat srcTest2 = Imgcodecs.imread(args[2]);
if (srcBase.empty() || srcTest1.empty() || srcTest2.empty()) {
System.err.println("Cannot read the images");
System.exit(0);
}
//! [Load three images with different environment settings]
//! [Convert to HSV]
Mat hsvBase = new Mat(), hsvTest1 = new Mat(), hsvTest2 = new Mat();
Imgproc.cvtColor( srcBase, hsvBase, Imgproc.COLOR_BGR2HSV );
Imgproc.cvtColor( srcTest1, hsvTest1, Imgproc.COLOR_BGR2HSV );
Imgproc.cvtColor( srcTest2, hsvTest2, Imgproc.COLOR_BGR2HSV );
//! [Convert to HSV]
//! [Convert to HSV half]
Mat hsvHalfDown = hsvBase.submat( new Range( hsvBase.rows()/2, hsvBase.rows() - 1 ), new Range( 0, hsvBase.cols() - 1 ) );
//! [Convert to HSV half]
//! [Using 50 bins for hue and 60 for saturation]
int hBins = 50, sBins = 60;
int[] histSize = { hBins, sBins };
// hue varies from 0 to 179, saturation from 0 to 255
float[] ranges = { 0, 180, 0, 256 };
// Use the 0-th and 1-st channels
int[] channels = { 0, 1 };
//! [Using 50 bins for hue and 60 for saturation]
//! [Calculate the histograms for the HSV images]
Mat histBase = new Mat(), histHalfDown = new Mat(), histTest1 = new Mat(), histTest2 = new Mat();
List<Mat> hsvBaseList = Arrays.asList(hsvBase);
Imgproc.calcHist(hsvBaseList, new MatOfInt(channels), new Mat(), histBase, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histBase, histBase, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvHalfDownList = Arrays.asList(hsvHalfDown);
Imgproc.calcHist(hsvHalfDownList, new MatOfInt(channels), new Mat(), histHalfDown, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histHalfDown, histHalfDown, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvTest1List = Arrays.asList(hsvTest1);
Imgproc.calcHist(hsvTest1List, new MatOfInt(channels), new Mat(), histTest1, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histTest1, histTest1, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvTest2List = Arrays.asList(hsvTest2);
Imgproc.calcHist(hsvTest2List, new MatOfInt(channels), new Mat(), histTest2, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histTest2, histTest2, 0, 1, Core.NORM_MINMAX);
//! [Calculate the histograms for the HSV images]
//! [Apply the histogram comparison methods]
for( int compareMethod = 0; compareMethod < 4; compareMethod++ ) {
double baseBase = Imgproc.compareHist( histBase, histBase, compareMethod );
double baseHalf = Imgproc.compareHist( histBase, histHalfDown, compareMethod );
double baseTest1 = Imgproc.compareHist( histBase, histTest1, compareMethod );
double baseTest2 = Imgproc.compareHist( histBase, histTest2, compareMethod );
System.out.println("Method " + compareMethod + " Perfect, Base-Half, Base-Test(1), Base-Test(2) : " + baseBase + " / " + baseHalf
+ " / " + baseTest1 + " / " + baseTest2);
}
//! [Apply the histogram comparison methods]
}
}
public class CompareHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new CompareHist().run(args);
}
}

@ -0,0 +1,49 @@
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class EqualizeHist {
public void run(String[] args) {
//! [Load image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load image]
//! [Convert to grayscale]
Imgproc.cvtColor(src, src, Imgproc.COLOR_BGR2GRAY);
//! [Convert to grayscale]
//! [Apply Histogram Equalization]
Mat dst = new Mat();
Imgproc.equalizeHist( src, dst );
//! [Apply Histogram Equalization]
//! [Display results]
HighGui.imshow( "Source image", src );
HighGui.imshow( "Equalized Image", dst );
//! [Display results]
//! [Wait until user exits the program]
HighGui.waitKey(0);
//! [Wait until user exits the program]
System.exit(0);
}
}
public class EqualizeHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new EqualizeHist().run(args);
}
}

@ -1,9 +1,8 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
@ -19,6 +18,7 @@ import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
@ -46,7 +46,7 @@ public class MorphologyDemo1 {
frame = new JFrame("Erosion and dilatation demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
BufferedImage img = toBufferedImage(matImgSrc);
Image img = HighGui.toBufferedImage(matImgSrc);
addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
@ -55,7 +55,7 @@ public class MorphologyDemo1 {
frame.setVisible(true);
}
private void addComponentsToPane(Container pane, BufferedImage img) {
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
@ -115,20 +115,6 @@ public class MorphologyDemo1 {
pane.add(imgLabel, BorderLayout.CENTER);
}
private BufferedImage toBufferedImage(Mat matrix) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if (matrix.channels() > 1) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = matrix.channels() * matrix.cols() * matrix.rows();
byte[] buffer = new byte[bufferSize];
matrix.get(0, 0, buffer); // get all the pixels
BufferedImage image = new BufferedImage(matrix.cols(), matrix.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(buffer, 0, targetPixels, 0, buffer.length);
return image;
}
private void update() {
Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1),
new Point(kernelSize, kernelSize));
@ -138,7 +124,7 @@ public class MorphologyDemo1 {
} else {
Imgproc.dilate(matImgSrc, matImgDst, element);
}
BufferedImage img = toBufferedImage(matImgDst);
Image img = HighGui.toBufferedImage(matImgDst);
imgLabel.setIcon(new ImageIcon(img));
frame.repaint();
}

@ -1,9 +1,8 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
@ -19,6 +18,7 @@ import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
@ -48,7 +48,7 @@ public class MorphologyDemo2 {
frame = new JFrame("Morphology Transformations demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
BufferedImage img = toBufferedImage(matImgSrc);
Image img = HighGui.toBufferedImage(matImgSrc);
addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
@ -57,7 +57,7 @@ public class MorphologyDemo2 {
frame.setVisible(true);
}
private void addComponentsToPane(Container pane, BufferedImage img) {
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
@ -117,26 +117,12 @@ public class MorphologyDemo2 {
pane.add(imgLabel, BorderLayout.CENTER);
}
private BufferedImage toBufferedImage(Mat matrix) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if (matrix.channels() > 1) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = matrix.channels() * matrix.cols() * matrix.rows();
byte[] buffer = new byte[bufferSize];
matrix.get(0, 0, buffer); // get all the pixels
BufferedImage image = new BufferedImage(matrix.cols(), matrix.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(buffer, 0, targetPixels, 0, buffer.length);
return image;
}
private void update() {
Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1),
new Point(kernelSize, kernelSize));
Imgproc.morphologyEx(matImgSrc, matImgDst, morphOpType, element);
BufferedImage img = toBufferedImage(matImgDst);
Image img = HighGui.toBufferedImage(matImgDst);
imgLabel.setIcon(new ImageIcon(img));
frame.repaint();
}

@ -1,16 +1,22 @@
import org.opencv.core.*;
import org.opencv.core.Point;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import javax.swing.*;
import java.awt.GridLayout;
import java.awt.Image;
import java.util.Hashtable;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.util.*;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class MatchTemplateDemoRun implements ChangeListener {
@ -26,9 +32,7 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [declare]
public void run(String[] args) {
if (args.length < 2)
{
if (args.length < 2) {
System.out.println("Not enough parameters");
System.out.println("Program arguments:\n<image_name> <template_name> [<mask_name>]");
System.exit(-1);
@ -45,19 +49,16 @@ class MatchTemplateDemoRun implements ChangeListener{
mask = Imgcodecs.imread(args[2], Imgcodecs.IMREAD_COLOR);
}
if(img.empty() || templ.empty() || (use_mask && mask.empty()))
{
if (img.empty() || templ.empty() || (use_mask && mask.empty())) {
System.out.println("Can't read one of the images");
System.exit(-1);
}
matchingMethod();
createJFrame();
}
private void matchingMethod() {
Mat result = new Mat();
//! [copy_source]
@ -76,12 +77,12 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [match_template]
/// Do the Matching and Normalize
Boolean method_accepts_mask = (Imgproc.TM_SQDIFF == match_method ||
match_method == Imgproc.TM_CCORR_NORMED);
if (use_mask && method_accepts_mask)
{ Imgproc.matchTemplate( img, templ, result, match_method, mask); }
else
{ Imgproc.matchTemplate( img, templ, result, match_method); }
Boolean method_accepts_mask = (Imgproc.TM_SQDIFF == match_method || match_method == Imgproc.TM_CCORR_NORMED);
if (use_mask && method_accepts_mask) {
Imgproc.matchTemplate(img, templ, result, match_method, mask);
} else {
Imgproc.matchTemplate(img, templ, result, match_method);
}
//! [match_template]
//! [normalize]
@ -90,7 +91,6 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [best_match]
/// Localizing the best match with minMaxLoc
double minVal; double maxVal;
Point matchLoc;
Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
@ -98,55 +98,42 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [match_loc]
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values.
// For all the other methods, the higher the better
if( match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED )
{ matchLoc = mmr.minLoc; }
else
{ matchLoc = mmr.maxLoc; }
/// For all the other methods, the higher the better
if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
//! [match_loc]
//! [imshow]
/// Show me what you got
Imgproc.rectangle(img_display, matchLoc, new Point(matchLoc.x + templ.cols(),
matchLoc.y + templ.rows()), new Scalar(0, 0, 0), 2, 8, 0);
Imgproc.rectangle(result, matchLoc, new Point(matchLoc.x + templ.cols(),
matchLoc.y + templ.rows()), new Scalar(0, 0, 0), 2, 8, 0);
Imgproc.rectangle(img_display, matchLoc, new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()),
new Scalar(0, 0, 0), 2, 8, 0);
Imgproc.rectangle(result, matchLoc, new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()),
new Scalar(0, 0, 0), 2, 8, 0);
Image tmpImg = toBufferedImage(img_display);
Image tmpImg = HighGui.toBufferedImage(img_display);
ImageIcon icon = new ImageIcon(tmpImg);
imgDisplay.setIcon(icon);
result.convertTo(result, CvType.CV_8UC1, 255.0);
tmpImg = toBufferedImage(result);
tmpImg = HighGui.toBufferedImage(result);
icon = new ImageIcon(tmpImg);
resultDisplay.setIcon(icon);
//! [imshow]
}
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
if (!source.getValueIsAdjusting()) {
match_method = (int)source.getValue();
match_method = source.getValue();
matchingMethod();
}
}
public Image toBufferedImage(Mat m) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if ( m.channels() > 1 ) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = m.channels()*m.cols()*m.rows();
byte [] b = new byte[bufferSize];
m.get(0,0,b); // get all the pixels
BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(b, 0, targetPixels, 0, b.length);
return image;
}
private void createJFrame() {
String title = "Source image; Control; Result image";
JFrame frame = new JFrame(title);
frame.setLayout(new GridLayout(2, 2));
@ -164,7 +151,7 @@ class MatchTemplateDemoRun implements ChangeListener{
slider.setMinorTickSpacing(1);
// Customizing the labels
Hashtable labelTable = new Hashtable();
Hashtable<Integer, JLabel> labelTable = new Hashtable<>();
labelTable.put(new Integer(0), new JLabel("0 - SQDIFF"));
labelTable.put(new Integer(1), new JLabel("1 - SQDIFF NORMED"));
labelTable.put(new Integer(2), new JLabel("2 - TM CCORR"));
@ -184,8 +171,7 @@ class MatchTemplateDemoRun implements ChangeListener{
}
}
public class MatchTemplateDemo
{
public class MatchTemplateDemo {
public static void main(String[] args) {
// load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

@ -0,0 +1,80 @@
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class GeometricTransforms {
public void run(String[] args) {
//! [Load the image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load the image]
//! [Set your 3 points to calculate the Affine Transform]
Point[] srcTri = new Point[3];
srcTri[0] = new Point( 0, 0 );
srcTri[1] = new Point( src.cols() - 1, 0 );
srcTri[2] = new Point( 0, src.rows() - 1 );
Point[] dstTri = new Point[3];
dstTri[0] = new Point( 0, src.rows()*0.33 );
dstTri[1] = new Point( src.cols()*0.85, src.rows()*0.25 );
dstTri[2] = new Point( src.cols()*0.15, src.rows()*0.7 );
//! [Set your 3 points to calculate the Affine Transform]
//! [Get the Affine Transform]
Mat warpMat = Imgproc.getAffineTransform( new MatOfPoint2f(srcTri), new MatOfPoint2f(dstTri) );
//! [Get the Affine Transform]
//! [Apply the Affine Transform just found to the src image]
Mat warpDst = Mat.zeros( src.rows(), src.cols(), src.type() );
Imgproc.warpAffine( src, warpDst, warpMat, warpDst.size() );
//! [Apply the Affine Transform just found to the src image]
/** Rotating the image after Warp */
//! [Compute a rotation matrix with respect to the center of the image]
Point center = new Point(warpDst.cols() / 2, warpDst.rows() / 2);
double angle = -50.0;
double scale = 0.6;
//! [Compute a rotation matrix with respect to the center of the image]
//! [Get the rotation matrix with the specifications above]
Mat rotMat = Imgproc.getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
//! [Rotate the warped image]
Mat warpRotateDst = new Mat();
Imgproc.warpAffine( warpDst, warpRotateDst, rotMat, warpDst.size() );
//! [Rotate the warped image]
//! [Show what you got]
HighGui.imshow( "Source image", src );
HighGui.imshow( "Warp", warpDst );
HighGui.imshow( "Warp + Rotate", warpRotateDst );
//! [Show what you got]
//! [Wait until user exits the program]
HighGui.waitKey(0);
//! [Wait until user exits the program]
System.exit(0);
}
}
public class GeometricTransformsDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new GeometricTransforms().run(args);
}
}

@ -0,0 +1,71 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
def Hist_and_Backproj(val):
## [initialize]
bins = val
histSize = max(bins, 2)
ranges = [0, 180] # hue_range
## [initialize]
## [Get the Histogram and normalize it]
hist = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
## [Get the Histogram and normalize it]
## [Get Backprojection]
backproj = cv.calcBackProject([hue], [0], hist, ranges, scale=1)
## [Get Backprojection]
## [Draw the backproj]
cv.imshow('BackProj', backproj)
## [Draw the backproj]
## [Draw the histogram]
w = 400
h = 400
bin_w = int(round(w / histSize))
histImg = np.zeros((h, w, 3), dtype=np.uint8)
for i in range(bins):
cv.rectangle(histImg, (i*bin_w, h), ( (i+1)*bin_w, h - int(round( hist[i]*h/255.0 )) ), (0, 0, 255), cv.FILLED)
cv.imshow('Histogram', histImg)
## [Draw the histogram]
## [Read the image]
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Read the image]
## [Transform it to HSV]
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
## [Transform it to HSV]
## [Use only the Hue value]
ch = (0, 0)
hue = np.empty(hsv.shape, hsv.dtype)
cv.mixChannels([hsv], [hue], ch)
## [Use only the Hue value]
## [Create Trackbar to enter the number of bins]
window_image = 'Source image'
cv.namedWindow(window_image)
bins = 25
cv.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj )
Hist_and_Backproj(bins)
## [Create Trackbar to enter the number of bins]
## [Show the image]
cv.imshow(window_image, src)
cv.waitKey()
## [Show the image]

@ -0,0 +1,79 @@
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
low = 20
up = 20
def callback_low(val):
global low
low = val
def callback_up(val):
global up
up = val
def pickPoint(event, x, y, flags, param):
if event != cv.EVENT_LBUTTONDOWN:
return
# Fill and get the mask
seed = (x, y)
newMaskVal = 255
newVal = (120, 120, 120)
connectivity = 8
flags = connectivity + (newMaskVal << 8 ) + cv.FLOODFILL_FIXED_RANGE + cv.FLOODFILL_MASK_ONLY
mask2 = np.zeros((src.shape[0] + 2, src.shape[1] + 2), dtype=np.uint8)
print('low:', low, 'up:', up)
cv.floodFill(src, mask2, seed, newVal, (low, low, low), (up, up, up), flags)
mask = mask2[1:-1,1:-1]
cv.imshow('Mask', mask)
Hist_and_Backproj(mask)
def Hist_and_Backproj(mask):
h_bins = 30
s_bins = 32
histSize = [h_bins, s_bins]
h_range = [0, 180]
s_range = [0, 256]
ranges = h_range + s_range # Concat list
channels = [0, 1]
# Get the Histogram and normalize it
hist = cv.calcHist([hsv], channels, mask, histSize, ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
# Get Backprojection
backproj = cv.calcBackProject([hsv], channels, hist, ranges, scale=1)
# Draw the backproj
cv.imshow('BackProj', backproj)
# Read the image
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Transform it to HSV
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
# Show the image
window_image = 'Source image'
cv.namedWindow(window_image)
cv.imshow(window_image, src)
# Set Trackbars for floodfill thresholds
cv.createTrackbar('Low thresh', window_image, low, 255, callback_low)
cv.createTrackbar('High thresh', window_image, up, 255, callback_up)
# Set a Mouse Callback
cv.setMouseCallback(window_image, pickPoint)
cv.waitKey()

@ -0,0 +1,71 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Separate the image in 3 places ( B, G and R )]
bgr_planes = cv.split(src)
## [Separate the image in 3 places ( B, G and R )]
## [Establish the number of bins]
histSize = 256
## [Establish the number of bins]
## [Set the ranges ( for B,G,R) )]
histRange = (0, 256) # the upper boundary is exclusive
## [Set the ranges ( for B,G,R) )]
## [Set histogram param]
accumulate = False
## [Set histogram param]
## [Compute the histograms]
b_hist = cv.calcHist(bgr_planes, [0], None, [histSize], histRange, accumulate=accumulate)
g_hist = cv.calcHist(bgr_planes, [1], None, [histSize], histRange, accumulate=accumulate)
r_hist = cv.calcHist(bgr_planes, [2], None, [histSize], histRange, accumulate=accumulate)
## [Compute the histograms]
## [Draw the histograms for B, G and R]
hist_w = 512
hist_h = 400
bin_w = int(round( hist_w/histSize ))
histImage = np.zeros((hist_h, hist_w, 3), dtype=np.uint8)
## [Draw the histograms for B, G and R]
## [Normalize the result to ( 0, histImage.rows )]
cv.normalize(b_hist, b_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(g_hist, g_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(r_hist, r_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
## [Normalize the result to ( 0, histImage.rows )]
## [Draw for each channel]
for i in range(1, histSize):
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(b_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(b_hist[i])) ),
( 255, 0, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(g_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(g_hist[i])) ),
( 0, 255, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(r_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(r_hist[i])) ),
( 0, 0, 255), thickness=2)
## [Draw for each channel]
## [Display]
cv.imshow('Source image', src)
cv.imshow('calcHist Demo', histImage)
cv.waitKey()
## [Display]

@ -0,0 +1,69 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load three images with different environment settings]
parser = argparse.ArgumentParser(description='Code for Histogram Comparison tutorial.')
parser.add_argument('--input1', help='Path to input image 1.')
parser.add_argument('--input2', help='Path to input image 2.')
parser.add_argument('--input3', help='Path to input image 3.')
args = parser.parse_args()
src_base = cv.imread(args.input1)
src_test1 = cv.imread(args.input2)
src_test2 = cv.imread(args.input3)
if src_base is None or src_test1 is None or src_test2 is None:
print('Could not open or find the images!')
exit(0)
## [Load three images with different environment settings]
## [Convert to HSV]
hsv_base = cv.cvtColor(src_base, cv.COLOR_BGR2HSV)
hsv_test1 = cv.cvtColor(src_test1, cv.COLOR_BGR2HSV)
hsv_test2 = cv.cvtColor(src_test2, cv.COLOR_BGR2HSV)
## [Convert to HSV]
## [Convert to HSV half]
hsv_half_down = hsv_base[hsv_base.shape[0]//2:,:]
## [Convert to HSV half]
## [Using 50 bins for hue and 60 for saturation]
h_bins = 50
s_bins = 60
histSize = [h_bins, s_bins]
# hue varies from 0 to 179, saturation from 0 to 255
h_ranges = [0, 180]
s_ranges = [0, 256]
ranges = h_ranges + s_ranges # concat lists
# Use the 0-th and 1-st channels
channels = [0, 1]
## [Using 50 bins for hue and 60 for saturation]
## [Calculate the histograms for the HSV images]
hist_base = cv.calcHist([hsv_base], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_base, hist_base, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_half_down = cv.calcHist([hsv_half_down], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_half_down, hist_half_down, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test1 = cv.calcHist([hsv_test1], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test1, hist_test1, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test2 = cv.calcHist([hsv_test2], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test2, hist_test2, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
## [Calculate the histograms for the HSV images]
## [Apply the histogram comparison methods]
for compare_method in range(4):
base_base = cv.compareHist(hist_base, hist_base, compare_method)
base_half = cv.compareHist(hist_base, hist_half_down, compare_method)
base_test1 = cv.compareHist(hist_base, hist_test1, compare_method)
base_test2 = cv.compareHist(hist_base, hist_test2, compare_method)
print('Method:', compare_method, 'Perfect, Base-Half, Base-Test(1), Base-Test(2) :',\
base_base, '/', base_half, '/', base_test1, '/', base_test2)
## [Apply the histogram comparison methods]

@ -0,0 +1,31 @@
from __future__ import print_function
import cv2 as cv
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Convert to grayscale]
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [Convert to grayscale]
## [Apply Histogram Equalization]
dst = cv.equalizeHist(src);
## [Apply Histogram Equalization]
## [Display results]
cv.imshow('Source image', src)
cv.imshow('Equalized Image', dst)
## [Display results]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]

@ -0,0 +1,54 @@
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
## [Load the image]
parser = argparse.ArgumentParser(description='Code for Affine Transformations tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load the image]
## [Set your 3 points to calculate the Affine Transform]
srcTri = np.array( [[0, 0], [src.shape[1] - 1, 0], [0, src.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, src.shape[1]*0.33], [src.shape[1]*0.85, src.shape[0]*0.25], [src.shape[1]*0.15, src.shape[0]*0.7]] ).astype(np.float32)
## [Set your 3 points to calculate the Affine Transform]
## [Get the Affine Transform]
warp_mat = cv.getAffineTransform(srcTri, dstTri)
## [Get the Affine Transform]
## [Apply the Affine Transform just found to the src image]
warp_dst = cv.warpAffine(src, warp_mat, (src.shape[1], src.shape[0]))
## [Apply the Affine Transform just found to the src image]
# Rotating the image after Warp
## [Compute a rotation matrix with respect to the center of the image]
center = (warp_dst.shape[1]//2, warp_dst.shape[0]//2)
angle = -50
scale = 0.6
## [Compute a rotation matrix with respect to the center of the image]
## [Get the rotation matrix with the specifications above]
rot_mat = cv.getRotationMatrix2D( center, angle, scale )
## [Get the rotation matrix with the specifications above]
## [Rotate the warped image]
warp_rotate_dst = cv.warpAffine(warp_dst, rot_mat, (warp_dst.shape[1], warp_dst.shape[0]))
## [Rotate the warped image]
## [Show what you got]
cv.imshow('Source image', src)
cv.imshow('Warp', warp_dst)
cv.imshow('Warp + Rotate', warp_rotate_dst)
## [Show what you got]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]
Loading…
Cancel
Save