Merge pull request #11598 from catree:add_tutorial_features2d_java_python
After Width: | Height: | Size: 53 KiB |
After Width: | Height: | Size: 79 KiB |
Before Width: | Height: | Size: 90 KiB After Width: | Height: | Size: 88 KiB |
@ -1,32 +0,0 @@ |
|||||||
Detecting corners location in subpixeles {#tutorial_corner_subpixeles} |
|
||||||
======================================== |
|
||||||
|
|
||||||
Goal |
|
||||||
---- |
|
||||||
|
|
||||||
In this tutorial you will learn how to: |
|
||||||
|
|
||||||
- Use the OpenCV function @ref cv::cornerSubPix to find more exact corner positions (more exact |
|
||||||
than integer pixels). |
|
||||||
|
|
||||||
Theory |
|
||||||
------ |
|
||||||
|
|
||||||
Code |
|
||||||
---- |
|
||||||
|
|
||||||
This tutorial code's is shown lines below. You can also download it from |
|
||||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp) |
|
||||||
@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp |
|
||||||
|
|
||||||
Explanation |
|
||||||
----------- |
|
||||||
|
|
||||||
Result |
|
||||||
------ |
|
||||||
|
|
||||||
 |
|
||||||
|
|
||||||
Here is the result: |
|
||||||
|
|
||||||
 |
|
@ -0,0 +1,46 @@ |
|||||||
|
Detecting corners location in subpixels {#tutorial_corner_subpixels} |
||||||
|
======================================= |
||||||
|
|
||||||
|
Goal |
||||||
|
---- |
||||||
|
|
||||||
|
In this tutorial you will learn how to: |
||||||
|
|
||||||
|
- Use the OpenCV function @ref cv::cornerSubPix to find more exact corner positions (more exact |
||||||
|
than integer pixels). |
||||||
|
|
||||||
|
Theory |
||||||
|
------ |
||||||
|
|
||||||
|
Code |
||||||
|
---- |
||||||
|
|
||||||
|
@add_toggle_cpp |
||||||
|
This tutorial code's is shown lines below. You can also download it from |
||||||
|
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp) |
||||||
|
@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp |
||||||
|
@end_toggle |
||||||
|
|
||||||
|
@add_toggle_java |
||||||
|
This tutorial code's is shown lines below. You can also download it from |
||||||
|
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java) |
||||||
|
@include samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java |
||||||
|
@end_toggle |
||||||
|
|
||||||
|
@add_toggle_python |
||||||
|
This tutorial code's is shown lines below. You can also download it from |
||||||
|
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py) |
||||||
|
@include samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py |
||||||
|
@end_toggle |
||||||
|
|
||||||
|
Explanation |
||||||
|
----------- |
||||||
|
|
||||||
|
Result |
||||||
|
------ |
||||||
|
|
||||||
|
 |
||||||
|
|
||||||
|
Here is the result: |
||||||
|
|
||||||
|
 |
Before Width: | Height: | Size: 6.2 KiB After Width: | Height: | Size: 6.2 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 27 KiB |
@ -0,0 +1,60 @@ |
|||||||
|
#include <iostream> |
||||||
|
#include "opencv2/core.hpp" |
||||||
|
#ifdef HAVE_OPENCV_XFEATURES2D |
||||||
|
#include "opencv2/highgui.hpp" |
||||||
|
#include "opencv2/features2d.hpp" |
||||||
|
#include "opencv2/xfeatures2d.hpp" |
||||||
|
|
||||||
|
using namespace cv; |
||||||
|
using namespace cv::xfeatures2d; |
||||||
|
using std::cout; |
||||||
|
using std::endl; |
||||||
|
|
||||||
|
const char* keys = |
||||||
|
"{ help h | | Print help message. }" |
||||||
|
"{ input1 | ../data/box.png | Path to input image 1. }" |
||||||
|
"{ input2 | ../data/box_in_scene.png | Path to input image 2. }"; |
||||||
|
|
||||||
|
int main( int argc, char* argv[] ) |
||||||
|
{ |
||||||
|
CommandLineParser parser( argc, argv, keys ); |
||||||
|
Mat img1 = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE ); |
||||||
|
Mat img2 = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE ); |
||||||
|
if ( img1.empty() || img2.empty() ) |
||||||
|
{ |
||||||
|
cout << "Could not open or find the image!\n" << endl; |
||||||
|
parser.printMessage(); |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
int minHessian = 400; |
||||||
|
Ptr<SURF> detector = SURF::create( minHessian ); |
||||||
|
std::vector<KeyPoint> keypoints1, keypoints2; |
||||||
|
Mat descriptors1, descriptors2; |
||||||
|
detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 ); |
||||||
|
detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 ); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a brute force matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE); |
||||||
|
std::vector< DMatch > matches; |
||||||
|
matcher->match( descriptors1, descriptors2, matches ); |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat img_matches; |
||||||
|
drawMatches( img1, keypoints1, img2, keypoints2, matches, img_matches ); |
||||||
|
|
||||||
|
//-- Show detected matches
|
||||||
|
imshow("Matches", img_matches ); |
||||||
|
|
||||||
|
waitKey(); |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#else |
||||||
|
int main() |
||||||
|
{ |
||||||
|
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl; |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#endif |
@ -0,0 +1,46 @@ |
|||||||
|
#include <iostream> |
||||||
|
#include "opencv2/core.hpp" |
||||||
|
#ifdef HAVE_OPENCV_XFEATURES2D |
||||||
|
#include "opencv2/highgui.hpp" |
||||||
|
#include "opencv2/features2d.hpp" |
||||||
|
#include "opencv2/xfeatures2d.hpp" |
||||||
|
|
||||||
|
using namespace cv; |
||||||
|
using namespace cv::xfeatures2d; |
||||||
|
using std::cout; |
||||||
|
using std::endl; |
||||||
|
|
||||||
|
int main( int argc, char* argv[] ) |
||||||
|
{ |
||||||
|
CommandLineParser parser( argc, argv, "{@input | ../data/box.png | input image}" ); |
||||||
|
Mat src = imread( parser.get<String>( "@input" ), IMREAD_GRAYSCALE ); |
||||||
|
if ( src.empty() ) |
||||||
|
{ |
||||||
|
cout << "Could not open or find the image!\n" << endl; |
||||||
|
cout << "Usage: " << argv[0] << " <Input image>" << endl; |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector
|
||||||
|
int minHessian = 400; |
||||||
|
Ptr<SURF> detector = SURF::create( minHessian ); |
||||||
|
std::vector<KeyPoint> keypoints; |
||||||
|
detector->detect( src, keypoints ); |
||||||
|
|
||||||
|
//-- Draw keypoints
|
||||||
|
Mat img_keypoints; |
||||||
|
drawKeypoints( src, keypoints, img_keypoints ); |
||||||
|
|
||||||
|
//-- Show detected (drawn) keypoints
|
||||||
|
imshow("SURF Keypoints", img_keypoints ); |
||||||
|
|
||||||
|
waitKey(); |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#else |
||||||
|
int main() |
||||||
|
{ |
||||||
|
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl; |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#endif |
@ -0,0 +1,72 @@ |
|||||||
|
#include <iostream> |
||||||
|
#include "opencv2/core.hpp" |
||||||
|
#ifdef HAVE_OPENCV_XFEATURES2D |
||||||
|
#include "opencv2/highgui.hpp" |
||||||
|
#include "opencv2/features2d.hpp" |
||||||
|
#include "opencv2/xfeatures2d.hpp" |
||||||
|
|
||||||
|
using namespace cv; |
||||||
|
using namespace cv::xfeatures2d; |
||||||
|
using std::cout; |
||||||
|
using std::endl; |
||||||
|
|
||||||
|
const char* keys = |
||||||
|
"{ help h | | Print help message. }" |
||||||
|
"{ input1 | ../data/box.png | Path to input image 1. }" |
||||||
|
"{ input2 | ../data/box_in_scene.png | Path to input image 2. }"; |
||||||
|
|
||||||
|
int main( int argc, char* argv[] ) |
||||||
|
{ |
||||||
|
CommandLineParser parser( argc, argv, keys ); |
||||||
|
Mat img1 = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE ); |
||||||
|
Mat img2 = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE ); |
||||||
|
if ( img1.empty() || img2.empty() ) |
||||||
|
{ |
||||||
|
cout << "Could not open or find the image!\n" << endl; |
||||||
|
parser.printMessage(); |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
int minHessian = 400; |
||||||
|
Ptr<SURF> detector = SURF::create( minHessian ); |
||||||
|
std::vector<KeyPoint> keypoints1, keypoints2; |
||||||
|
Mat descriptors1, descriptors2; |
||||||
|
detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 ); |
||||||
|
detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 ); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED); |
||||||
|
std::vector< std::vector<DMatch> > knn_matches; |
||||||
|
matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 ); |
||||||
|
|
||||||
|
//-- Filter matches using the Lowe's ratio test
|
||||||
|
const float ratio_thresh = 0.7f; |
||||||
|
std::vector<DMatch> good_matches; |
||||||
|
for (size_t i = 0; i < knn_matches.size(); i++) |
||||||
|
{ |
||||||
|
if (knn_matches[i].size() > 1 && knn_matches[i][0].distance / knn_matches[i][1].distance <= ratio_thresh) |
||||||
|
{ |
||||||
|
good_matches.push_back(knn_matches[i][0]); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat img_matches; |
||||||
|
drawMatches( img1, keypoints1, img2, keypoints2, good_matches, img_matches, Scalar::all(-1), |
||||||
|
Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); |
||||||
|
|
||||||
|
//-- Show detected matches
|
||||||
|
imshow("Good Matches", img_matches ); |
||||||
|
|
||||||
|
waitKey(); |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#else |
||||||
|
int main() |
||||||
|
{ |
||||||
|
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl; |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#endif |
@ -0,0 +1,107 @@ |
|||||||
|
#include <iostream> |
||||||
|
#include "opencv2/core.hpp" |
||||||
|
#ifdef HAVE_OPENCV_XFEATURES2D |
||||||
|
#include "opencv2/calib3d.hpp" |
||||||
|
#include "opencv2/highgui.hpp" |
||||||
|
#include "opencv2/imgproc.hpp" |
||||||
|
#include "opencv2/features2d.hpp" |
||||||
|
#include "opencv2/xfeatures2d.hpp" |
||||||
|
|
||||||
|
using namespace cv; |
||||||
|
using namespace cv::xfeatures2d; |
||||||
|
using std::cout; |
||||||
|
using std::endl; |
||||||
|
|
||||||
|
const char* keys = |
||||||
|
"{ help h | | Print help message. }" |
||||||
|
"{ input1 | ../data/box.png | Path to input image 1. }" |
||||||
|
"{ input2 | ../data/box_in_scene.png | Path to input image 2. }"; |
||||||
|
|
||||||
|
int main( int argc, char* argv[] ) |
||||||
|
{ |
||||||
|
CommandLineParser parser( argc, argv, keys ); |
||||||
|
Mat img_object = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE ); |
||||||
|
Mat img_scene = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE ); |
||||||
|
if ( img_object.empty() || img_scene.empty() ) |
||||||
|
{ |
||||||
|
cout << "Could not open or find the image!\n" << endl; |
||||||
|
parser.printMessage(); |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
int minHessian = 400; |
||||||
|
Ptr<SURF> detector = SURF::create( minHessian ); |
||||||
|
std::vector<KeyPoint> keypoints_object, keypoints_scene; |
||||||
|
Mat descriptors_object, descriptors_scene; |
||||||
|
detector->detectAndCompute( img_object, noArray(), keypoints_object, descriptors_object ); |
||||||
|
detector->detectAndCompute( img_scene, noArray(), keypoints_scene, descriptors_scene ); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED); |
||||||
|
std::vector< std::vector<DMatch> > knn_matches; |
||||||
|
matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 ); |
||||||
|
|
||||||
|
//-- Filter matches using the Lowe's ratio test
|
||||||
|
const float ratio_thresh = 0.75f; |
||||||
|
std::vector<DMatch> good_matches; |
||||||
|
for (size_t i = 0; i < knn_matches.size(); i++) |
||||||
|
{ |
||||||
|
if (knn_matches[i].size() > 1 && knn_matches[i][0].distance / knn_matches[i][1].distance <= ratio_thresh) |
||||||
|
{ |
||||||
|
good_matches.push_back(knn_matches[i][0]); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat img_matches; |
||||||
|
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), |
||||||
|
Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); |
||||||
|
|
||||||
|
//-- Localize the object
|
||||||
|
std::vector<Point2f> obj; |
||||||
|
std::vector<Point2f> scene; |
||||||
|
|
||||||
|
for( size_t i = 0; i < good_matches.size(); i++ ) |
||||||
|
{ |
||||||
|
//-- Get the keypoints from the good matches
|
||||||
|
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); |
||||||
|
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); |
||||||
|
} |
||||||
|
|
||||||
|
Mat H = findHomography( obj, scene, RANSAC ); |
||||||
|
|
||||||
|
//-- Get the corners from the image_1 ( the object to be "detected" )
|
||||||
|
std::vector<Point2f> obj_corners(4); |
||||||
|
obj_corners[0] = Point2f(0, 0); |
||||||
|
obj_corners[1] = Point2f( (float)img_object.cols, 0 ); |
||||||
|
obj_corners[2] = Point2f( (float)img_object.cols, (float)img_object.rows ); |
||||||
|
obj_corners[3] = Point2f( 0, (float)img_object.rows ); |
||||||
|
std::vector<Point2f> scene_corners(4); |
||||||
|
|
||||||
|
perspectiveTransform( obj_corners, scene_corners, H); |
||||||
|
|
||||||
|
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||||
|
line( img_matches, scene_corners[0] + Point2f((float)img_object.cols, 0), |
||||||
|
scene_corners[1] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4 ); |
||||||
|
line( img_matches, scene_corners[1] + Point2f((float)img_object.cols, 0), |
||||||
|
scene_corners[2] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 ); |
||||||
|
line( img_matches, scene_corners[2] + Point2f((float)img_object.cols, 0), |
||||||
|
scene_corners[3] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 ); |
||||||
|
line( img_matches, scene_corners[3] + Point2f((float)img_object.cols, 0), |
||||||
|
scene_corners[0] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 ); |
||||||
|
|
||||||
|
//-- Show detected matches
|
||||||
|
imshow("Good Matches & Object detection", img_matches ); |
||||||
|
|
||||||
|
waitKey(); |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#else |
||||||
|
int main() |
||||||
|
{ |
||||||
|
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl; |
||||||
|
return 0; |
||||||
|
} |
||||||
|
#endif |
@ -0,0 +1,158 @@ |
|||||||
|
import java.awt.BorderLayout; |
||||||
|
import java.awt.Container; |
||||||
|
import java.awt.Image; |
||||||
|
import java.util.Random; |
||||||
|
|
||||||
|
import javax.swing.BoxLayout; |
||||||
|
import javax.swing.ImageIcon; |
||||||
|
import javax.swing.JFrame; |
||||||
|
import javax.swing.JLabel; |
||||||
|
import javax.swing.JPanel; |
||||||
|
import javax.swing.JSlider; |
||||||
|
import javax.swing.event.ChangeEvent; |
||||||
|
import javax.swing.event.ChangeListener; |
||||||
|
|
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.CvType; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfPoint; |
||||||
|
import org.opencv.core.Point; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.core.Size; |
||||||
|
import org.opencv.core.TermCriteria; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.imgproc.Imgproc; |
||||||
|
|
||||||
|
class CornerSubPix { |
||||||
|
private Mat src = new Mat(); |
||||||
|
private Mat srcGray = new Mat(); |
||||||
|
private JFrame frame; |
||||||
|
private JLabel imgLabel; |
||||||
|
private static final int MAX_CORNERS = 25; |
||||||
|
private int maxCorners = 10; |
||||||
|
private Random rng = new Random(12345); |
||||||
|
|
||||||
|
public CornerSubPix(String[] args) { |
||||||
|
/// Load source image and convert it to gray
|
||||||
|
String filename = args.length > 0 ? args[0] : "../data/pic3.png"; |
||||||
|
src = Imgcodecs.imread(filename); |
||||||
|
if (src.empty()) { |
||||||
|
System.err.println("Cannot read image: " + filename); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY); |
||||||
|
|
||||||
|
// Create and set up the window.
|
||||||
|
frame = new JFrame("Shi-Tomasi corner detector demo"); |
||||||
|
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); |
||||||
|
// Set up the content pane.
|
||||||
|
Image img = HighGui.toBufferedImage(src); |
||||||
|
addComponentsToPane(frame.getContentPane(), img); |
||||||
|
// Use the content pane's default BorderLayout. No need for
|
||||||
|
// setLayout(new BorderLayout());
|
||||||
|
// Display the window.
|
||||||
|
frame.pack(); |
||||||
|
frame.setVisible(true); |
||||||
|
update(); |
||||||
|
} |
||||||
|
|
||||||
|
private void addComponentsToPane(Container pane, Image img) { |
||||||
|
if (!(pane.getLayout() instanceof BorderLayout)) { |
||||||
|
pane.add(new JLabel("Container doesn't use BorderLayout!")); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
JPanel sliderPanel = new JPanel(); |
||||||
|
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS)); |
||||||
|
|
||||||
|
sliderPanel.add(new JLabel("Max corners:")); |
||||||
|
JSlider slider = new JSlider(0, MAX_CORNERS, maxCorners); |
||||||
|
slider.setMajorTickSpacing(20); |
||||||
|
slider.setMinorTickSpacing(10); |
||||||
|
slider.setPaintTicks(true); |
||||||
|
slider.setPaintLabels(true); |
||||||
|
slider.addChangeListener(new ChangeListener() { |
||||||
|
@Override |
||||||
|
public void stateChanged(ChangeEvent e) { |
||||||
|
JSlider source = (JSlider) e.getSource(); |
||||||
|
maxCorners = source.getValue(); |
||||||
|
update(); |
||||||
|
} |
||||||
|
}); |
||||||
|
sliderPanel.add(slider); |
||||||
|
pane.add(sliderPanel, BorderLayout.PAGE_START); |
||||||
|
|
||||||
|
imgLabel = new JLabel(new ImageIcon(img)); |
||||||
|
pane.add(imgLabel, BorderLayout.CENTER); |
||||||
|
} |
||||||
|
|
||||||
|
private void update() { |
||||||
|
/// Parameters for Shi-Tomasi algorithm
|
||||||
|
maxCorners = Math.max(maxCorners, 1); |
||||||
|
MatOfPoint corners = new MatOfPoint(); |
||||||
|
double qualityLevel = 0.01; |
||||||
|
double minDistance = 10; |
||||||
|
int blockSize = 3, gradientSize = 3; |
||||||
|
boolean useHarrisDetector = false; |
||||||
|
double k = 0.04; |
||||||
|
|
||||||
|
/// Copy the source image
|
||||||
|
Mat copy = src.clone(); |
||||||
|
|
||||||
|
/// Apply corner detection
|
||||||
|
Imgproc.goodFeaturesToTrack(srcGray, corners, maxCorners, qualityLevel, minDistance, new Mat(), |
||||||
|
blockSize, gradientSize, useHarrisDetector, k); |
||||||
|
|
||||||
|
/// Draw corners detected
|
||||||
|
System.out.println("** Number of corners detected: " + corners.rows()); |
||||||
|
int[] cornersData = new int[(int) (corners.total() * corners.channels())]; |
||||||
|
corners.get(0, 0, cornersData); |
||||||
|
int radius = 4; |
||||||
|
Mat matCorners = new Mat(corners.rows(), 2, CvType.CV_32F); |
||||||
|
float[] matCornersData = new float[(int) (matCorners.total() * matCorners.channels())]; |
||||||
|
matCorners.get(0, 0, matCornersData); |
||||||
|
for (int i = 0; i < corners.rows(); i++) { |
||||||
|
Imgproc.circle(copy, new Point(cornersData[i * 2], cornersData[i * 2 + 1]), radius, |
||||||
|
new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED); |
||||||
|
matCornersData[i * 2] = cornersData[i * 2]; |
||||||
|
matCornersData[i * 2 + 1] = cornersData[i * 2 + 1]; |
||||||
|
} |
||||||
|
matCorners.put(0, 0, matCornersData); |
||||||
|
|
||||||
|
imgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(copy))); |
||||||
|
frame.repaint(); |
||||||
|
|
||||||
|
/// Set the needed parameters to find the refined corners
|
||||||
|
Size winSize = new Size(5, 5); |
||||||
|
Size zeroZone = new Size(-1, -1); |
||||||
|
TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.COUNT, 40, 0.001); |
||||||
|
|
||||||
|
/// Calculate the refined corner locations
|
||||||
|
Imgproc.cornerSubPix(srcGray, matCorners, winSize, zeroZone, criteria); |
||||||
|
|
||||||
|
/// Write them down
|
||||||
|
matCorners.get(0, 0, matCornersData); |
||||||
|
for (int i = 0; i < corners.rows(); i++) { |
||||||
|
System.out.println( |
||||||
|
" -- Refined Corner [" + i + "] (" + matCornersData[i * 2] + "," + matCornersData[i * 2 + 1] + ")"); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class CornerSubPixDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
// Schedule a job for the event dispatch thread:
|
||||||
|
// creating and showing this application's GUI.
|
||||||
|
javax.swing.SwingUtilities.invokeLater(new Runnable() { |
||||||
|
@Override |
||||||
|
public void run() { |
||||||
|
new CornerSubPix(args); |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,190 @@ |
|||||||
|
import java.awt.BorderLayout; |
||||||
|
import java.awt.Container; |
||||||
|
import java.awt.Image; |
||||||
|
import java.util.Random; |
||||||
|
|
||||||
|
import javax.swing.BoxLayout; |
||||||
|
import javax.swing.ImageIcon; |
||||||
|
import javax.swing.JFrame; |
||||||
|
import javax.swing.JLabel; |
||||||
|
import javax.swing.JPanel; |
||||||
|
import javax.swing.JSlider; |
||||||
|
import javax.swing.event.ChangeEvent; |
||||||
|
import javax.swing.event.ChangeListener; |
||||||
|
|
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.Core.MinMaxLocResult; |
||||||
|
import org.opencv.core.CvType; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.Point; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.imgproc.Imgproc; |
||||||
|
|
||||||
|
class CornerDetector { |
||||||
|
private Mat src = new Mat(); |
||||||
|
private Mat srcGray = new Mat(); |
||||||
|
private Mat harrisDst = new Mat(); |
||||||
|
private Mat shiTomasiDst = new Mat(); |
||||||
|
private Mat harrisCopy = new Mat(); |
||||||
|
private Mat shiTomasiCopy = new Mat(); |
||||||
|
private Mat Mc = new Mat(); |
||||||
|
private JFrame frame; |
||||||
|
private JLabel harrisImgLabel; |
||||||
|
private JLabel shiTomasiImgLabel; |
||||||
|
private static final int MAX_QUALITY_LEVEL = 100; |
||||||
|
private int qualityLevel = 50; |
||||||
|
private double harrisMinVal; |
||||||
|
private double harrisMaxVal; |
||||||
|
private double shiTomasiMinVal; |
||||||
|
private double shiTomasiMaxVal; |
||||||
|
private Random rng = new Random(12345); |
||||||
|
|
||||||
|
public CornerDetector(String[] args) { |
||||||
|
/// Load source image and convert it to gray
|
||||||
|
String filename = args.length > 0 ? args[0] : "../data/building.jpg"; |
||||||
|
src = Imgcodecs.imread(filename); |
||||||
|
if (src.empty()) { |
||||||
|
System.err.println("Cannot read image: " + filename); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY); |
||||||
|
|
||||||
|
// Create and set up the window.
|
||||||
|
frame = new JFrame("Creating your own corner detector demo"); |
||||||
|
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); |
||||||
|
// Set up the content pane.
|
||||||
|
Image img = HighGui.toBufferedImage(src); |
||||||
|
addComponentsToPane(frame.getContentPane(), img); |
||||||
|
// Use the content pane's default BorderLayout. No need for
|
||||||
|
// setLayout(new BorderLayout());
|
||||||
|
// Display the window.
|
||||||
|
frame.pack(); |
||||||
|
frame.setVisible(true); |
||||||
|
|
||||||
|
/// Set some parameters
|
||||||
|
int blockSize = 3, apertureSize = 3; |
||||||
|
|
||||||
|
/// My Harris matrix -- Using cornerEigenValsAndVecs
|
||||||
|
Imgproc.cornerEigenValsAndVecs(srcGray, harrisDst, blockSize, apertureSize); |
||||||
|
|
||||||
|
/* calculate Mc */ |
||||||
|
Mc = Mat.zeros(srcGray.size(), CvType.CV_32F); |
||||||
|
|
||||||
|
float[] harrisData = new float[(int) (harrisDst.total() * harrisDst.channels())]; |
||||||
|
harrisDst.get(0, 0, harrisData); |
||||||
|
float[] McData = new float[(int) (Mc.total() * Mc.channels())]; |
||||||
|
Mc.get(0, 0, McData); |
||||||
|
|
||||||
|
for( int i = 0; i < srcGray.rows(); i++ ) { |
||||||
|
for( int j = 0; j < srcGray.cols(); j++ ) { |
||||||
|
float lambda1 = harrisData[(i*srcGray.cols() + j) * 6]; |
||||||
|
float lambda2 = harrisData[(i*srcGray.cols() + j) * 6 + 1]; |
||||||
|
McData[i*srcGray.cols()+j] = (float) (lambda1*lambda2 - 0.04f*Math.pow( ( lambda1 + lambda2 ), 2 )); |
||||||
|
} |
||||||
|
} |
||||||
|
Mc.put(0, 0, McData); |
||||||
|
|
||||||
|
MinMaxLocResult res = Core.minMaxLoc(Mc); |
||||||
|
harrisMinVal = res.minVal; |
||||||
|
harrisMaxVal = res.maxVal; |
||||||
|
|
||||||
|
/// My Shi-Tomasi -- Using cornerMinEigenVal
|
||||||
|
Imgproc.cornerMinEigenVal(srcGray, shiTomasiDst, blockSize, apertureSize); |
||||||
|
res = Core.minMaxLoc(shiTomasiDst); |
||||||
|
shiTomasiMinVal = res.minVal; |
||||||
|
shiTomasiMaxVal = res.maxVal; |
||||||
|
|
||||||
|
update(); |
||||||
|
} |
||||||
|
|
||||||
|
private void addComponentsToPane(Container pane, Image img) { |
||||||
|
if (!(pane.getLayout() instanceof BorderLayout)) { |
||||||
|
pane.add(new JLabel("Container doesn't use BorderLayout!")); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
JPanel sliderPanel = new JPanel(); |
||||||
|
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS)); |
||||||
|
|
||||||
|
sliderPanel.add(new JLabel("Max corners:")); |
||||||
|
JSlider slider = new JSlider(0, MAX_QUALITY_LEVEL, qualityLevel); |
||||||
|
slider.setMajorTickSpacing(20); |
||||||
|
slider.setMinorTickSpacing(10); |
||||||
|
slider.setPaintTicks(true); |
||||||
|
slider.setPaintLabels(true); |
||||||
|
slider.addChangeListener(new ChangeListener() { |
||||||
|
@Override |
||||||
|
public void stateChanged(ChangeEvent e) { |
||||||
|
JSlider source = (JSlider) e.getSource(); |
||||||
|
qualityLevel = source.getValue(); |
||||||
|
update(); |
||||||
|
} |
||||||
|
}); |
||||||
|
sliderPanel.add(slider); |
||||||
|
pane.add(sliderPanel, BorderLayout.PAGE_START); |
||||||
|
|
||||||
|
JPanel imgPanel = new JPanel(); |
||||||
|
harrisImgLabel = new JLabel(new ImageIcon(img)); |
||||||
|
shiTomasiImgLabel = new JLabel(new ImageIcon(img)); |
||||||
|
imgPanel.add(harrisImgLabel); |
||||||
|
imgPanel.add(shiTomasiImgLabel); |
||||||
|
pane.add(imgPanel, BorderLayout.CENTER); |
||||||
|
} |
||||||
|
|
||||||
|
private void update() { |
||||||
|
int qualityLevelVal = Math.max(qualityLevel, 1); |
||||||
|
|
||||||
|
//Harris
|
||||||
|
harrisCopy = src.clone(); |
||||||
|
|
||||||
|
float[] McData = new float[(int) (Mc.total() * Mc.channels())]; |
||||||
|
Mc.get(0, 0, McData); |
||||||
|
for (int i = 0; i < srcGray.rows(); i++) { |
||||||
|
for (int j = 0; j < srcGray.cols(); j++) { |
||||||
|
if (McData[i * srcGray.cols() + j] > harrisMinVal |
||||||
|
+ (harrisMaxVal - harrisMinVal) * qualityLevelVal / MAX_QUALITY_LEVEL) { |
||||||
|
Imgproc.circle(harrisCopy, new Point(j, i), 4, |
||||||
|
new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
//Shi-Tomasi
|
||||||
|
shiTomasiCopy = src.clone(); |
||||||
|
|
||||||
|
float[] shiTomasiData = new float[(int) (shiTomasiDst.total() * shiTomasiDst.channels())]; |
||||||
|
shiTomasiDst.get(0, 0, shiTomasiData); |
||||||
|
for (int i = 0; i < srcGray.rows(); i++) { |
||||||
|
for (int j = 0; j < srcGray.cols(); j++) { |
||||||
|
if (shiTomasiData[i * srcGray.cols() + j] > shiTomasiMinVal |
||||||
|
+ (shiTomasiMaxVal - shiTomasiMinVal) * qualityLevelVal / MAX_QUALITY_LEVEL) { |
||||||
|
Imgproc.circle(shiTomasiCopy, new Point(j, i), 4, |
||||||
|
new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
harrisImgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(harrisCopy))); |
||||||
|
shiTomasiImgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(shiTomasiCopy))); |
||||||
|
frame.repaint(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class CornerDetectorDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
// Schedule a job for the event dispatch thread:
|
||||||
|
// creating and showing this application's GUI.
|
||||||
|
javax.swing.SwingUtilities.invokeLater(new Runnable() { |
||||||
|
@Override |
||||||
|
public void run() { |
||||||
|
new CornerDetector(args); |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,134 @@ |
|||||||
|
import java.awt.BorderLayout; |
||||||
|
import java.awt.Container; |
||||||
|
import java.awt.Image; |
||||||
|
import java.util.Random; |
||||||
|
|
||||||
|
import javax.swing.BoxLayout; |
||||||
|
import javax.swing.ImageIcon; |
||||||
|
import javax.swing.JFrame; |
||||||
|
import javax.swing.JLabel; |
||||||
|
import javax.swing.JPanel; |
||||||
|
import javax.swing.JSlider; |
||||||
|
import javax.swing.event.ChangeEvent; |
||||||
|
import javax.swing.event.ChangeListener; |
||||||
|
|
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfPoint; |
||||||
|
import org.opencv.core.Point; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.imgproc.Imgproc; |
||||||
|
|
||||||
|
class GoodFeaturesToTrack { |
||||||
|
private Mat src = new Mat(); |
||||||
|
private Mat srcGray = new Mat(); |
||||||
|
private JFrame frame; |
||||||
|
private JLabel imgLabel; |
||||||
|
private static final int MAX_THRESHOLD = 100; |
||||||
|
private int maxCorners = 23; |
||||||
|
private Random rng = new Random(12345); |
||||||
|
|
||||||
|
public GoodFeaturesToTrack(String[] args) { |
||||||
|
/// Load source image and convert it to gray
|
||||||
|
String filename = args.length > 0 ? args[0] : "../data/pic3.png"; |
||||||
|
src = Imgcodecs.imread(filename); |
||||||
|
if (src.empty()) { |
||||||
|
System.err.println("Cannot read image: " + filename); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY); |
||||||
|
|
||||||
|
// Create and set up the window.
|
||||||
|
frame = new JFrame("Shi-Tomasi corner detector demo"); |
||||||
|
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); |
||||||
|
// Set up the content pane.
|
||||||
|
Image img = HighGui.toBufferedImage(src); |
||||||
|
addComponentsToPane(frame.getContentPane(), img); |
||||||
|
// Use the content pane's default BorderLayout. No need for
|
||||||
|
// setLayout(new BorderLayout());
|
||||||
|
// Display the window.
|
||||||
|
frame.pack(); |
||||||
|
frame.setVisible(true); |
||||||
|
update(); |
||||||
|
} |
||||||
|
|
||||||
|
private void addComponentsToPane(Container pane, Image img) { |
||||||
|
if (!(pane.getLayout() instanceof BorderLayout)) { |
||||||
|
pane.add(new JLabel("Container doesn't use BorderLayout!")); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
JPanel sliderPanel = new JPanel(); |
||||||
|
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS)); |
||||||
|
|
||||||
|
sliderPanel.add(new JLabel("Max corners:")); |
||||||
|
JSlider slider = new JSlider(0, MAX_THRESHOLD, maxCorners); |
||||||
|
slider.setMajorTickSpacing(20); |
||||||
|
slider.setMinorTickSpacing(10); |
||||||
|
slider.setPaintTicks(true); |
||||||
|
slider.setPaintLabels(true); |
||||||
|
slider.addChangeListener(new ChangeListener() { |
||||||
|
@Override |
||||||
|
public void stateChanged(ChangeEvent e) { |
||||||
|
JSlider source = (JSlider) e.getSource(); |
||||||
|
maxCorners = source.getValue(); |
||||||
|
update(); |
||||||
|
} |
||||||
|
}); |
||||||
|
sliderPanel.add(slider); |
||||||
|
pane.add(sliderPanel, BorderLayout.PAGE_START); |
||||||
|
|
||||||
|
imgLabel = new JLabel(new ImageIcon(img)); |
||||||
|
pane.add(imgLabel, BorderLayout.CENTER); |
||||||
|
} |
||||||
|
|
||||||
|
private void update() { |
||||||
|
/// Parameters for Shi-Tomasi algorithm
|
||||||
|
maxCorners = Math.max(maxCorners, 1); |
||||||
|
MatOfPoint corners = new MatOfPoint(); |
||||||
|
double qualityLevel = 0.01; |
||||||
|
double minDistance = 10; |
||||||
|
int blockSize = 3, gradientSize = 3; |
||||||
|
boolean useHarrisDetector = false; |
||||||
|
double k = 0.04; |
||||||
|
|
||||||
|
/// Copy the source image
|
||||||
|
Mat copy = src.clone(); |
||||||
|
|
||||||
|
/// Apply corner detection
|
||||||
|
Imgproc.goodFeaturesToTrack(srcGray, corners, maxCorners, qualityLevel, minDistance, new Mat(), |
||||||
|
blockSize, gradientSize, useHarrisDetector, k); |
||||||
|
|
||||||
|
/// Draw corners detected
|
||||||
|
System.out.println("** Number of corners detected: " + corners.rows()); |
||||||
|
int[] cornersData = new int[(int) (corners.total() * corners.channels())]; |
||||||
|
corners.get(0, 0, cornersData); |
||||||
|
int radius = 4; |
||||||
|
for (int i = 0; i < corners.rows(); i++) { |
||||||
|
Imgproc.circle(copy, new Point(cornersData[i * 2], cornersData[i * 2 + 1]), radius, |
||||||
|
new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED); |
||||||
|
} |
||||||
|
|
||||||
|
imgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(copy))); |
||||||
|
frame.repaint(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class GoodFeaturesToTrackDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
// Schedule a job for the event dispatch thread:
|
||||||
|
// creating and showing this application's GUI.
|
||||||
|
javax.swing.SwingUtilities.invokeLater(new Runnable() { |
||||||
|
@Override |
||||||
|
public void run() { |
||||||
|
new GoodFeaturesToTrack(args); |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,142 @@ |
|||||||
|
import java.awt.BorderLayout; |
||||||
|
import java.awt.Container; |
||||||
|
import java.awt.Image; |
||||||
|
|
||||||
|
import javax.swing.BoxLayout; |
||||||
|
import javax.swing.ImageIcon; |
||||||
|
import javax.swing.JFrame; |
||||||
|
import javax.swing.JLabel; |
||||||
|
import javax.swing.JPanel; |
||||||
|
import javax.swing.JSlider; |
||||||
|
import javax.swing.event.ChangeEvent; |
||||||
|
import javax.swing.event.ChangeListener; |
||||||
|
|
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.CvType; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.Point; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.imgproc.Imgproc; |
||||||
|
|
||||||
|
class CornerHarris { |
||||||
|
private Mat srcGray = new Mat(); |
||||||
|
private Mat dst = new Mat(); |
||||||
|
private Mat dstNorm = new Mat(); |
||||||
|
private Mat dstNormScaled = new Mat(); |
||||||
|
private JFrame frame; |
||||||
|
private JLabel imgLabel; |
||||||
|
private JLabel cornerLabel; |
||||||
|
private static final int MAX_THRESHOLD = 255; |
||||||
|
private int threshold = 200; |
||||||
|
|
||||||
|
public CornerHarris(String[] args) { |
||||||
|
/// Load source image and convert it to gray
|
||||||
|
String filename = args.length > 0 ? args[0] : "../data/building.jpg"; |
||||||
|
Mat src = Imgcodecs.imread(filename); |
||||||
|
if (src.empty()) { |
||||||
|
System.err.println("Cannot read image: " + filename); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY); |
||||||
|
|
||||||
|
// Create and set up the window.
|
||||||
|
frame = new JFrame("Harris corner detector demo"); |
||||||
|
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); |
||||||
|
// Set up the content pane.
|
||||||
|
Image img = HighGui.toBufferedImage(src); |
||||||
|
addComponentsToPane(frame.getContentPane(), img); |
||||||
|
// Use the content pane's default BorderLayout. No need for
|
||||||
|
// setLayout(new BorderLayout());
|
||||||
|
// Display the window.
|
||||||
|
frame.pack(); |
||||||
|
frame.setVisible(true); |
||||||
|
update(); |
||||||
|
} |
||||||
|
|
||||||
|
private void addComponentsToPane(Container pane, Image img) { |
||||||
|
if (!(pane.getLayout() instanceof BorderLayout)) { |
||||||
|
pane.add(new JLabel("Container doesn't use BorderLayout!")); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
JPanel sliderPanel = new JPanel(); |
||||||
|
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS)); |
||||||
|
|
||||||
|
sliderPanel.add(new JLabel("Threshold: ")); |
||||||
|
JSlider slider = new JSlider(0, MAX_THRESHOLD, threshold); |
||||||
|
slider.setMajorTickSpacing(20); |
||||||
|
slider.setMinorTickSpacing(10); |
||||||
|
slider.setPaintTicks(true); |
||||||
|
slider.setPaintLabels(true); |
||||||
|
slider.addChangeListener(new ChangeListener() { |
||||||
|
@Override |
||||||
|
public void stateChanged(ChangeEvent e) { |
||||||
|
JSlider source = (JSlider) e.getSource(); |
||||||
|
threshold = source.getValue(); |
||||||
|
update(); |
||||||
|
} |
||||||
|
}); |
||||||
|
sliderPanel.add(slider); |
||||||
|
pane.add(sliderPanel, BorderLayout.PAGE_START); |
||||||
|
|
||||||
|
JPanel imgPanel = new JPanel(); |
||||||
|
imgLabel = new JLabel(new ImageIcon(img)); |
||||||
|
imgPanel.add(imgLabel); |
||||||
|
|
||||||
|
Mat blackImg = Mat.zeros(srcGray.size(), CvType.CV_8U); |
||||||
|
cornerLabel = new JLabel(new ImageIcon(HighGui.toBufferedImage(blackImg))); |
||||||
|
imgPanel.add(cornerLabel); |
||||||
|
|
||||||
|
pane.add(imgPanel, BorderLayout.CENTER); |
||||||
|
} |
||||||
|
|
||||||
|
private void update() { |
||||||
|
dst = Mat.zeros(srcGray.size(), CvType.CV_32F); |
||||||
|
|
||||||
|
/// Detector parameters
|
||||||
|
int blockSize = 2; |
||||||
|
int apertureSize = 3; |
||||||
|
double k = 0.04; |
||||||
|
|
||||||
|
/// Detecting corners
|
||||||
|
Imgproc.cornerHarris(srcGray, dst, blockSize, apertureSize, k); |
||||||
|
|
||||||
|
/// Normalizing
|
||||||
|
Core.normalize(dst, dstNorm, 0, 255, Core.NORM_MINMAX); |
||||||
|
Core.convertScaleAbs(dstNorm, dstNormScaled); |
||||||
|
|
||||||
|
/// Drawing a circle around corners
|
||||||
|
float[] dstNormData = new float[(int) (dstNorm.total() * dstNorm.channels())]; |
||||||
|
dstNorm.get(0, 0, dstNormData); |
||||||
|
|
||||||
|
for (int i = 0; i < dstNorm.rows(); i++) { |
||||||
|
for (int j = 0; j < dstNorm.cols(); j++) { |
||||||
|
if ((int) dstNormData[i * dstNorm.cols() + j] > threshold) { |
||||||
|
Imgproc.circle(dstNormScaled, new Point(j, i), 5, new Scalar(0), 2, 8, 0); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
cornerLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(dstNormScaled))); |
||||||
|
frame.repaint(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class CornerHarrisDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
// Schedule a job for the event dispatch thread:
|
||||||
|
// creating and showing this application's GUI.
|
||||||
|
javax.swing.SwingUtilities.invokeLater(new Runnable() { |
||||||
|
@Override |
||||||
|
public void run() { |
||||||
|
new CornerHarris(args); |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfDMatch; |
||||||
|
import org.opencv.core.MatOfKeyPoint; |
||||||
|
import org.opencv.features2d.DescriptorMatcher; |
||||||
|
import org.opencv.features2d.Features2d; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.xfeatures2d.SURF; |
||||||
|
|
||||||
|
class SURFMatching { |
||||||
|
public void run(String[] args) { |
||||||
|
String filename1 = args.length > 1 ? args[0] : "../data/box.png"; |
||||||
|
String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png"; |
||||||
|
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
if (img1.empty() || img2.empty()) { |
||||||
|
System.err.println("Cannot read images!"); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
double hessianThreshold = 400; |
||||||
|
int nOctaves = 4, nOctaveLayers = 3; |
||||||
|
boolean extended = false, upright = false; |
||||||
|
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright); |
||||||
|
MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint(); |
||||||
|
Mat descriptors1 = new Mat(), descriptors2 = new Mat(); |
||||||
|
detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1); |
||||||
|
detector.detectAndCompute(img2, new Mat(), keypoints2, descriptors2); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a brute force matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE); |
||||||
|
MatOfDMatch matches = new MatOfDMatch(); |
||||||
|
matcher.match(descriptors1, descriptors2, matches); |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat imgMatches = new Mat(); |
||||||
|
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches); |
||||||
|
|
||||||
|
HighGui.imshow("Matches", imgMatches); |
||||||
|
HighGui.waitKey(0); |
||||||
|
|
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class SURFMatchingDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
new SURFMatching().run(args); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,44 @@ |
|||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfKeyPoint; |
||||||
|
import org.opencv.features2d.Features2d; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.xfeatures2d.SURF; |
||||||
|
|
||||||
|
class SURFDetection { |
||||||
|
public void run(String[] args) { |
||||||
|
String filename = args.length > 0 ? args[0] : "../data/box.png"; |
||||||
|
Mat src = Imgcodecs.imread(filename, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
if (src.empty()) { |
||||||
|
System.err.println("Cannot read image: " + filename); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector
|
||||||
|
double hessianThreshold = 400; |
||||||
|
int nOctaves = 4, nOctaveLayers = 3; |
||||||
|
boolean extended = false, upright = false; |
||||||
|
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright); |
||||||
|
MatOfKeyPoint keypoints = new MatOfKeyPoint(); |
||||||
|
detector.detect(src, keypoints); |
||||||
|
|
||||||
|
//-- Draw keypoints
|
||||||
|
Features2d.drawKeypoints(src, keypoints, src); |
||||||
|
|
||||||
|
//-- Show detected (drawn) keypoints
|
||||||
|
HighGui.imshow("SURF Keypoints", src); |
||||||
|
HighGui.waitKey(0); |
||||||
|
|
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class SURFDetectionDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
new SURFDetection().run(args); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,78 @@ |
|||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.List; |
||||||
|
|
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.DMatch; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfByte; |
||||||
|
import org.opencv.core.MatOfDMatch; |
||||||
|
import org.opencv.core.MatOfKeyPoint; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.features2d.DescriptorMatcher; |
||||||
|
import org.opencv.features2d.Features2d; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.xfeatures2d.SURF; |
||||||
|
|
||||||
|
class SURFFLANNMatching { |
||||||
|
public void run(String[] args) { |
||||||
|
String filename1 = args.length > 1 ? args[0] : "../data/box.png"; |
||||||
|
String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png"; |
||||||
|
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
if (img1.empty() || img2.empty()) { |
||||||
|
System.err.println("Cannot read images!"); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
double hessianThreshold = 400; |
||||||
|
int nOctaves = 4, nOctaveLayers = 3; |
||||||
|
boolean extended = false, upright = false; |
||||||
|
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright); |
||||||
|
MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint(); |
||||||
|
Mat descriptors1 = new Mat(), descriptors2 = new Mat(); |
||||||
|
detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1); |
||||||
|
detector.detectAndCompute(img2, new Mat(), keypoints2, descriptors2); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); |
||||||
|
List<MatOfDMatch> knnMatches = new ArrayList<>(); |
||||||
|
matcher.knnMatch(descriptors1, descriptors2, knnMatches, 2); |
||||||
|
|
||||||
|
//-- Filter matches using the Lowe's ratio test
|
||||||
|
float ratio_thresh = 0.7f; |
||||||
|
List<DMatch> listOfGoodMatches = new ArrayList<>(); |
||||||
|
for (int i = 0; i < knnMatches.size(); i++) { |
||||||
|
if (knnMatches.get(i).rows() > 1) { |
||||||
|
DMatch[] matches = knnMatches.get(i).toArray(); |
||||||
|
if (matches[0].distance / matches[1].distance <= ratio_thresh) { |
||||||
|
listOfGoodMatches.add(matches[0]); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
MatOfDMatch goodMatches = new MatOfDMatch(); |
||||||
|
goodMatches.fromList(listOfGoodMatches); |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat imgMatches = new Mat(); |
||||||
|
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, goodMatches, imgMatches, Scalar.all(-1), |
||||||
|
Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS); |
||||||
|
|
||||||
|
//-- Show detected matches
|
||||||
|
HighGui.imshow("Good Matches", imgMatches); |
||||||
|
HighGui.waitKey(0); |
||||||
|
|
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class SURFFLANNMatchingDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
new SURFFLANNMatching().run(args); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,130 @@ |
|||||||
|
import java.util.ArrayList; |
||||||
|
import java.util.List; |
||||||
|
|
||||||
|
import org.opencv.calib3d.Calib3d; |
||||||
|
import org.opencv.core.Core; |
||||||
|
import org.opencv.core.CvType; |
||||||
|
import org.opencv.core.DMatch; |
||||||
|
import org.opencv.core.KeyPoint; |
||||||
|
import org.opencv.core.Mat; |
||||||
|
import org.opencv.core.MatOfByte; |
||||||
|
import org.opencv.core.MatOfDMatch; |
||||||
|
import org.opencv.core.MatOfKeyPoint; |
||||||
|
import org.opencv.core.MatOfPoint2f; |
||||||
|
import org.opencv.core.Point; |
||||||
|
import org.opencv.core.Scalar; |
||||||
|
import org.opencv.features2d.DescriptorMatcher; |
||||||
|
import org.opencv.features2d.Features2d; |
||||||
|
import org.opencv.highgui.HighGui; |
||||||
|
import org.opencv.imgcodecs.Imgcodecs; |
||||||
|
import org.opencv.imgproc.Imgproc; |
||||||
|
import org.opencv.xfeatures2d.SURF; |
||||||
|
|
||||||
|
class SURFFLANNMatchingHomography { |
||||||
|
public void run(String[] args) { |
||||||
|
String filenameObject = args.length > 1 ? args[0] : "../data/box.png"; |
||||||
|
String filenameScene = args.length > 1 ? args[1] : "../data/box_in_scene.png"; |
||||||
|
Mat imgObject = Imgcodecs.imread(filenameObject, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
Mat imgScene = Imgcodecs.imread(filenameScene, Imgcodecs.IMREAD_GRAYSCALE); |
||||||
|
if (imgObject.empty() || imgScene.empty()) { |
||||||
|
System.err.println("Cannot read images!"); |
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
|
||||||
|
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||||
|
double hessianThreshold = 400; |
||||||
|
int nOctaves = 4, nOctaveLayers = 3; |
||||||
|
boolean extended = false, upright = false; |
||||||
|
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright); |
||||||
|
MatOfKeyPoint keypointsObject = new MatOfKeyPoint(), keypointsScene = new MatOfKeyPoint(); |
||||||
|
Mat descriptorsObject = new Mat(), descriptorsScene = new Mat(); |
||||||
|
detector.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject); |
||||||
|
detector.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene); |
||||||
|
|
||||||
|
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||||
|
// Since SURF is a floating-point descriptor NORM_L2 is used
|
||||||
|
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); |
||||||
|
List<MatOfDMatch> knnMatches = new ArrayList<>(); |
||||||
|
matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2); |
||||||
|
|
||||||
|
//-- Filter matches using the Lowe's ratio test
|
||||||
|
float ratio_thresh = 0.75f; |
||||||
|
List<DMatch> listOfGoodMatches = new ArrayList<>(); |
||||||
|
for (int i = 0; i < knnMatches.size(); i++) { |
||||||
|
if (knnMatches.get(i).rows() > 1) { |
||||||
|
DMatch[] matches = knnMatches.get(i).toArray(); |
||||||
|
if (matches[0].distance / matches[1].distance <= ratio_thresh) { |
||||||
|
listOfGoodMatches.add(matches[0]); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
MatOfDMatch goodMatches = new MatOfDMatch(); |
||||||
|
goodMatches.fromList(listOfGoodMatches); |
||||||
|
|
||||||
|
//-- Draw matches
|
||||||
|
Mat imgMatches = new Mat(); |
||||||
|
Features2d.drawMatches(imgObject, keypointsObject, imgScene, keypointsScene, goodMatches, imgMatches, Scalar.all(-1), |
||||||
|
Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS); |
||||||
|
|
||||||
|
//-- Localize the object
|
||||||
|
List<Point> obj = new ArrayList<>(); |
||||||
|
List<Point> scene = new ArrayList<>(); |
||||||
|
|
||||||
|
List<KeyPoint> listOfKeypointsObject = keypointsObject.toList(); |
||||||
|
List<KeyPoint> listOfKeypointsScene = keypointsScene.toList(); |
||||||
|
for (int i = 0; i < listOfGoodMatches.size(); i++) { |
||||||
|
//-- Get the keypoints from the good matches
|
||||||
|
obj.add(listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt); |
||||||
|
scene.add(listOfKeypointsScene.get(listOfGoodMatches.get(i).trainIdx).pt); |
||||||
|
} |
||||||
|
|
||||||
|
MatOfPoint2f objMat = new MatOfPoint2f(), sceneMat = new MatOfPoint2f(); |
||||||
|
objMat.fromList(obj); |
||||||
|
sceneMat.fromList(scene); |
||||||
|
double ransacReprojThreshold = 3.0; |
||||||
|
Mat H = Calib3d.findHomography( objMat, sceneMat, Calib3d.RANSAC, ransacReprojThreshold ); |
||||||
|
|
||||||
|
//-- Get the corners from the image_1 ( the object to be "detected" )
|
||||||
|
Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat(); |
||||||
|
float[] objCornersData = new float[(int) (objCorners.total() * objCorners.channels())]; |
||||||
|
objCorners.get(0, 0, objCornersData); |
||||||
|
objCornersData[0] = 0; |
||||||
|
objCornersData[1] = 0; |
||||||
|
objCornersData[2] = imgObject.cols(); |
||||||
|
objCornersData[3] = 0; |
||||||
|
objCornersData[4] = imgObject.cols(); |
||||||
|
objCornersData[5] = imgObject.rows(); |
||||||
|
objCornersData[6] = 0; |
||||||
|
objCornersData[7] = imgObject.rows(); |
||||||
|
objCorners.put(0, 0, objCornersData); |
||||||
|
|
||||||
|
Core.perspectiveTransform(objCorners, sceneCorners, H); |
||||||
|
float[] sceneCornersData = new float[(int) (sceneCorners.total() * sceneCorners.channels())]; |
||||||
|
sceneCorners.get(0, 0, sceneCornersData); |
||||||
|
|
||||||
|
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||||
|
Imgproc.line(imgMatches, new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]), |
||||||
|
new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]), new Scalar(0, 255, 0), 4); |
||||||
|
Imgproc.line(imgMatches, new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]), |
||||||
|
new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]), new Scalar(0, 255, 0), 4); |
||||||
|
Imgproc.line(imgMatches, new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]), |
||||||
|
new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]), new Scalar(0, 255, 0), 4); |
||||||
|
Imgproc.line(imgMatches, new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]), |
||||||
|
new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]), new Scalar(0, 255, 0), 4); |
||||||
|
|
||||||
|
//-- Show detected matches
|
||||||
|
HighGui.imshow("Good Matches & Object detection", imgMatches); |
||||||
|
HighGui.waitKey(0); |
||||||
|
|
||||||
|
System.exit(0); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
public class SURFFLANNMatchingHomographyDemo { |
||||||
|
public static void main(String[] args) { |
||||||
|
// Load the native OpenCV library
|
||||||
|
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||||
|
|
||||||
|
new SURFFLANNMatchingHomography().run(args); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,70 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
import random as rng |
||||||
|
|
||||||
|
source_window = 'Image' |
||||||
|
maxTrackbar = 25 |
||||||
|
rng.seed(12345) |
||||||
|
|
||||||
|
def goodFeaturesToTrack_Demo(val): |
||||||
|
maxCorners = max(val, 1) |
||||||
|
|
||||||
|
# Parameters for Shi-Tomasi algorithm |
||||||
|
qualityLevel = 0.01 |
||||||
|
minDistance = 10 |
||||||
|
blockSize = 3 |
||||||
|
gradientSize = 3 |
||||||
|
useHarrisDetector = False |
||||||
|
k = 0.04 |
||||||
|
|
||||||
|
# Copy the source image |
||||||
|
copy = np.copy(src) |
||||||
|
|
||||||
|
# Apply corner detection |
||||||
|
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \ |
||||||
|
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k) |
||||||
|
|
||||||
|
# Draw corners detected |
||||||
|
print('** Number of corners detected:', corners.shape[0]) |
||||||
|
radius = 4 |
||||||
|
for i in range(corners.shape[0]): |
||||||
|
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED) |
||||||
|
|
||||||
|
# Show what you got |
||||||
|
cv.namedWindow(source_window) |
||||||
|
cv.imshow(source_window, copy) |
||||||
|
|
||||||
|
# Set the needed parameters to find the refined corners |
||||||
|
winSize = (5, 5) |
||||||
|
zeroZone = (-1, -1) |
||||||
|
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 40, 0.001) |
||||||
|
|
||||||
|
# Calculate the refined corner locations |
||||||
|
corners = cv.cornerSubPix(src_gray, corners, winSize, zeroZone, criteria) |
||||||
|
|
||||||
|
# Write them down |
||||||
|
for i in range(corners.shape[0]): |
||||||
|
print(" -- Refined Corner [", i, "] (", corners[i,0,0], ",", corners[i,0,1], ")") |
||||||
|
|
||||||
|
# Load source image and convert it to gray |
||||||
|
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') |
||||||
|
parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
src = cv.imread(args.input) |
||||||
|
if src is None: |
||||||
|
print('Could not open or find the image:', args.input) |
||||||
|
exit(0) |
||||||
|
|
||||||
|
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) |
||||||
|
|
||||||
|
# Create a window and a trackbar |
||||||
|
cv.namedWindow(source_window) |
||||||
|
maxCorners = 10 # initial threshold |
||||||
|
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo) |
||||||
|
cv.imshow(source_window, src) |
||||||
|
goodFeaturesToTrack_Demo(maxCorners) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,80 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
import random as rng |
||||||
|
|
||||||
|
myHarris_window = 'My Harris corner detector' |
||||||
|
myShiTomasi_window = 'My Shi Tomasi corner detector' |
||||||
|
myHarris_qualityLevel = 50 |
||||||
|
myShiTomasi_qualityLevel = 50 |
||||||
|
max_qualityLevel = 100 |
||||||
|
rng.seed(12345) |
||||||
|
|
||||||
|
def myHarris_function(val): |
||||||
|
myHarris_copy = np.copy(src) |
||||||
|
myHarris_qualityLevel = max(val, 1) |
||||||
|
|
||||||
|
for i in range(src_gray.shape[0]): |
||||||
|
for j in range(src_gray.shape[1]): |
||||||
|
if Mc[i,j] > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel: |
||||||
|
cv.circle(myHarris_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED) |
||||||
|
|
||||||
|
cv.imshow(myHarris_window, myHarris_copy) |
||||||
|
|
||||||
|
def myShiTomasi_function(val): |
||||||
|
myShiTomasi_copy = np.copy(src) |
||||||
|
myShiTomasi_qualityLevel = max(val, 1) |
||||||
|
|
||||||
|
for i in range(src_gray.shape[0]): |
||||||
|
for j in range(src_gray.shape[1]): |
||||||
|
if myShiTomasi_dst[i,j] > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel: |
||||||
|
cv.circle(myShiTomasi_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED) |
||||||
|
|
||||||
|
cv.imshow(myShiTomasi_window, myShiTomasi_copy) |
||||||
|
|
||||||
|
# Load source image and convert it to gray |
||||||
|
parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.') |
||||||
|
parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
src = cv.imread(args.input) |
||||||
|
if src is None: |
||||||
|
print('Could not open or find the image:', args.input) |
||||||
|
exit(0) |
||||||
|
|
||||||
|
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) |
||||||
|
|
||||||
|
# Set some parameters |
||||||
|
blockSize = 3 |
||||||
|
apertureSize = 3 |
||||||
|
|
||||||
|
# My Harris matrix -- Using cornerEigenValsAndVecs |
||||||
|
myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize) |
||||||
|
|
||||||
|
# calculate Mc |
||||||
|
Mc = np.empty(src_gray.shape, dtype=np.float32) |
||||||
|
for i in range(src_gray.shape[0]): |
||||||
|
for j in range(src_gray.shape[1]): |
||||||
|
lambda_1 = myHarris_dst[i,j,0] |
||||||
|
lambda_2 = myHarris_dst[i,j,1] |
||||||
|
Mc[i,j] = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 ) |
||||||
|
|
||||||
|
myHarris_minVal, myHarris_maxVal, _, _ = cv.minMaxLoc(Mc) |
||||||
|
|
||||||
|
# Create Window and Trackbar |
||||||
|
cv.namedWindow(myHarris_window) |
||||||
|
cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function) |
||||||
|
myHarris_function(myHarris_qualityLevel) |
||||||
|
|
||||||
|
# My Shi-Tomasi -- Using cornerMinEigenVal |
||||||
|
myShiTomasi_dst = cv.cornerMinEigenVal(src_gray, blockSize, apertureSize) |
||||||
|
|
||||||
|
myShiTomasi_minVal, myShiTomasi_maxVal, _, _ = cv.minMaxLoc(myShiTomasi_dst) |
||||||
|
|
||||||
|
# Create Window and Trackbar |
||||||
|
cv.namedWindow(myShiTomasi_window) |
||||||
|
cv.createTrackbar('Quality Level:', myShiTomasi_window, myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function) |
||||||
|
myShiTomasi_function(myShiTomasi_qualityLevel) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,58 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
import random as rng |
||||||
|
|
||||||
|
source_window = 'Image' |
||||||
|
maxTrackbar = 100 |
||||||
|
rng.seed(12345) |
||||||
|
|
||||||
|
def goodFeaturesToTrack_Demo(val): |
||||||
|
maxCorners = max(val, 1) |
||||||
|
|
||||||
|
# Parameters for Shi-Tomasi algorithm |
||||||
|
qualityLevel = 0.01 |
||||||
|
minDistance = 10 |
||||||
|
blockSize = 3 |
||||||
|
gradientSize = 3 |
||||||
|
useHarrisDetector = False |
||||||
|
k = 0.04 |
||||||
|
|
||||||
|
# Copy the source image |
||||||
|
copy = np.copy(src) |
||||||
|
|
||||||
|
# Apply corner detection |
||||||
|
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \ |
||||||
|
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k) |
||||||
|
|
||||||
|
# Draw corners detected |
||||||
|
print('** Number of corners detected:', corners.shape[0]) |
||||||
|
radius = 4 |
||||||
|
for i in range(corners.shape[0]): |
||||||
|
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED) |
||||||
|
|
||||||
|
# Show what you got |
||||||
|
cv.namedWindow(source_window) |
||||||
|
cv.imshow(source_window, copy) |
||||||
|
|
||||||
|
# Load source image and convert it to gray |
||||||
|
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') |
||||||
|
parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
src = cv.imread(args.input) |
||||||
|
if src is None: |
||||||
|
print('Could not open or find the image:', args.input) |
||||||
|
exit(0) |
||||||
|
|
||||||
|
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) |
||||||
|
|
||||||
|
# Create a window and a trackbar |
||||||
|
cv.namedWindow(source_window) |
||||||
|
maxCorners = 23 # initial threshold |
||||||
|
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo) |
||||||
|
cv.imshow(source_window, src) |
||||||
|
goodFeaturesToTrack_Demo(maxCorners) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,55 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
|
||||||
|
source_window = 'Source image' |
||||||
|
corners_window = 'Corners detected' |
||||||
|
max_thresh = 255 |
||||||
|
|
||||||
|
def cornerHarris_demo(val): |
||||||
|
thresh = val |
||||||
|
|
||||||
|
# Detector parameters |
||||||
|
blockSize = 2 |
||||||
|
apertureSize = 3 |
||||||
|
k = 0.04 |
||||||
|
|
||||||
|
# Detecting corners |
||||||
|
dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k) |
||||||
|
|
||||||
|
# Normalizing |
||||||
|
dst_norm = np.empty(dst.shape, dtype=np.float32) |
||||||
|
cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) |
||||||
|
dst_norm_scaled = cv.convertScaleAbs(dst_norm) |
||||||
|
|
||||||
|
# Drawing a circle around corners |
||||||
|
for i in range(dst_norm.shape[0]): |
||||||
|
for j in range(dst_norm.shape[1]): |
||||||
|
if int(dst_norm[i,j]) > thresh: |
||||||
|
cv.circle(dst_norm_scaled, (j,i), 5, (0), 2) |
||||||
|
|
||||||
|
# Showing the result |
||||||
|
cv.namedWindow(corners_window) |
||||||
|
cv.imshow(corners_window, dst_norm_scaled) |
||||||
|
|
||||||
|
# Load source image and convert it to gray |
||||||
|
parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.') |
||||||
|
parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
src = cv.imread(args.input) |
||||||
|
if src is None: |
||||||
|
print('Could not open or find the image:', args.input) |
||||||
|
exit(0) |
||||||
|
|
||||||
|
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) |
||||||
|
|
||||||
|
# Create a window and a trackbar |
||||||
|
cv.namedWindow(source_window) |
||||||
|
thresh = 200 # initial threshold |
||||||
|
cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo) |
||||||
|
cv.imshow(source_window, src) |
||||||
|
cornerHarris_demo(thresh) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,35 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') |
||||||
|
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') |
||||||
|
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) |
||||||
|
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) |
||||||
|
if img1 is None or img2 is None: |
||||||
|
print('Could not open or find the images!') |
||||||
|
exit(0) |
||||||
|
|
||||||
|
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors |
||||||
|
minHessian = 400 |
||||||
|
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) |
||||||
|
keypoints1, descriptors1 = detector.detectAndCompute(img1, None) |
||||||
|
keypoints2, descriptors2 = detector.detectAndCompute(img2, None) |
||||||
|
|
||||||
|
#-- Step 2: Matching descriptor vectors with a brute force matcher |
||||||
|
# Since SURF is a floating-point descriptor NORM_L2 is used |
||||||
|
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE) |
||||||
|
matches = matcher.match(descriptors1, descriptors2) |
||||||
|
|
||||||
|
#-- Draw matches |
||||||
|
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8) |
||||||
|
cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches) |
||||||
|
|
||||||
|
#-- Show detected matches |
||||||
|
cv.imshow('Matches', img_matches) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,27 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') |
||||||
|
parser.add_argument('--input', help='Path to input image.', default='../data/box.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
src = cv.imread(args.input, cv.IMREAD_GRAYSCALE) |
||||||
|
if src is None: |
||||||
|
print('Could not open or find the image:', args.input) |
||||||
|
exit(0) |
||||||
|
|
||||||
|
#-- Step 1: Detect the keypoints using SURF Detector |
||||||
|
minHessian = 400 |
||||||
|
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) |
||||||
|
keypoints = detector.detect(src) |
||||||
|
|
||||||
|
#-- Draw keypoints |
||||||
|
img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8) |
||||||
|
cv.drawKeypoints(src, keypoints, img_keypoints) |
||||||
|
|
||||||
|
#-- Show detected (drawn) keypoints |
||||||
|
cv.imshow('SURF Keypoints', img_keypoints) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,43 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') |
||||||
|
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') |
||||||
|
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) |
||||||
|
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) |
||||||
|
if img1 is None or img2 is None: |
||||||
|
print('Could not open or find the images!') |
||||||
|
exit(0) |
||||||
|
|
||||||
|
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors |
||||||
|
minHessian = 400 |
||||||
|
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) |
||||||
|
keypoints1, descriptors1 = detector.detectAndCompute(img1, None) |
||||||
|
keypoints2, descriptors2 = detector.detectAndCompute(img2, None) |
||||||
|
|
||||||
|
#-- Step 2: Matching descriptor vectors with a FLANN based matcher |
||||||
|
# Since SURF is a floating-point descriptor NORM_L2 is used |
||||||
|
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) |
||||||
|
knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2) |
||||||
|
|
||||||
|
#-- Filter matches using the Lowe's ratio test |
||||||
|
ratio_thresh = 0.7 |
||||||
|
good_matches = [] |
||||||
|
for matches in knn_matches: |
||||||
|
if len(matches) > 1: |
||||||
|
if matches[0].distance / matches[1].distance <= ratio_thresh: |
||||||
|
good_matches.append(matches[0]) |
||||||
|
|
||||||
|
#-- Draw matches |
||||||
|
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8) |
||||||
|
cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) |
||||||
|
|
||||||
|
#-- Show detected matches |
||||||
|
cv.imshow('Good Matches', img_matches) |
||||||
|
|
||||||
|
cv.waitKey() |
@ -0,0 +1,78 @@ |
|||||||
|
from __future__ import print_function |
||||||
|
import cv2 as cv |
||||||
|
import numpy as np |
||||||
|
import argparse |
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') |
||||||
|
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') |
||||||
|
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') |
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) |
||||||
|
img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) |
||||||
|
if img_object is None or img_scene is None: |
||||||
|
print('Could not open or find the images!') |
||||||
|
exit(0) |
||||||
|
|
||||||
|
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors |
||||||
|
minHessian = 400 |
||||||
|
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) |
||||||
|
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None) |
||||||
|
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None) |
||||||
|
|
||||||
|
#-- Step 2: Matching descriptor vectors with a FLANN based matcher |
||||||
|
# Since SURF is a floating-point descriptor NORM_L2 is used |
||||||
|
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) |
||||||
|
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2) |
||||||
|
|
||||||
|
#-- Filter matches using the Lowe's ratio test |
||||||
|
ratio_thresh = 0.75 |
||||||
|
good_matches = [] |
||||||
|
for matches in knn_matches: |
||||||
|
if len(matches) > 1: |
||||||
|
if matches[0].distance / matches[1].distance <= ratio_thresh: |
||||||
|
good_matches.append(matches[0]) |
||||||
|
|
||||||
|
#-- Draw matches |
||||||
|
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8) |
||||||
|
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) |
||||||
|
|
||||||
|
#-- Localize the object |
||||||
|
obj = np.empty((len(good_matches),2), dtype=np.float32) |
||||||
|
scene = np.empty((len(good_matches),2), dtype=np.float32) |
||||||
|
for i in range(len(good_matches)): |
||||||
|
#-- Get the keypoints from the good matches |
||||||
|
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0] |
||||||
|
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1] |
||||||
|
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0] |
||||||
|
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1] |
||||||
|
|
||||||
|
H, _ = cv.findHomography(obj, scene, cv.RANSAC) |
||||||
|
|
||||||
|
#-- Get the corners from the image_1 ( the object to be "detected" ) |
||||||
|
obj_corners = np.empty((4,1,2), dtype=np.float32) |
||||||
|
obj_corners[0,0,0] = 0 |
||||||
|
obj_corners[0,0,1] = 0 |
||||||
|
obj_corners[1,0,0] = img_object.shape[1] |
||||||
|
obj_corners[1,0,1] = 0 |
||||||
|
obj_corners[2,0,0] = img_object.shape[1] |
||||||
|
obj_corners[2,0,1] = img_object.shape[0] |
||||||
|
obj_corners[3,0,0] = 0 |
||||||
|
obj_corners[3,0,1] = img_object.shape[0] |
||||||
|
|
||||||
|
scene_corners = cv.perspectiveTransform(obj_corners, H) |
||||||
|
|
||||||
|
#-- Draw lines between the corners (the mapped object in the scene - image_2 ) |
||||||
|
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\ |
||||||
|
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4) |
||||||
|
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\ |
||||||
|
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4) |
||||||
|
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\ |
||||||
|
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4) |
||||||
|
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\ |
||||||
|
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4) |
||||||
|
|
||||||
|
#-- Show detected matches |
||||||
|
cv.imshow('Good Matches & Object detection', img_matches) |
||||||
|
|
||||||
|
cv.waitKey() |