mirror of https://github.com/opencv/opencv.git
parent
4dc7e617a4
commit
41b95cae38
15 changed files with 1145 additions and 184 deletions
@ -0,0 +1,144 @@ |
||||
import java.util.ArrayList; |
||||
import java.util.List; |
||||
|
||||
import org.opencv.core.Core; |
||||
import org.opencv.core.CvType; |
||||
import org.opencv.core.Mat; |
||||
import org.opencv.core.MatOfPoint; |
||||
import org.opencv.core.Point; |
||||
import org.opencv.core.Scalar; |
||||
import org.opencv.highgui.HighGui; |
||||
import org.opencv.imgcodecs.Imgcodecs; |
||||
import org.opencv.imgproc.Imgproc; |
||||
|
||||
//This program demonstrates how to use OpenCV PCA to extract the orientation of an object.
|
||||
class IntroductionToPCA { |
||||
private void drawAxis(Mat img, Point p_, Point q_, Scalar colour, float scale) { |
||||
Point p = new Point(p_.x, p_.y); |
||||
Point q = new Point(q_.x, q_.y); |
||||
//! [visualization1]
|
||||
double angle = Math.atan2(p.y - q.y, p.x - q.x); // angle in radians
|
||||
double hypotenuse = Math.sqrt((p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x)); |
||||
|
||||
// Here we lengthen the arrow by a factor of scale
|
||||
q.x = (int) (p.x - scale * hypotenuse * Math.cos(angle)); |
||||
q.y = (int) (p.y - scale * hypotenuse * Math.sin(angle)); |
||||
Imgproc.line(img, p, q, colour, 1, Core.LINE_AA, 0); |
||||
|
||||
// create the arrow hooks
|
||||
p.x = (int) (q.x + 9 * Math.cos(angle + Math.PI / 4)); |
||||
p.y = (int) (q.y + 9 * Math.sin(angle + Math.PI / 4)); |
||||
Imgproc.line(img, p, q, colour, 1, Core.LINE_AA, 0); |
||||
|
||||
p.x = (int) (q.x + 9 * Math.cos(angle - Math.PI / 4)); |
||||
p.y = (int) (q.y + 9 * Math.sin(angle - Math.PI / 4)); |
||||
Imgproc.line(img, p, q, colour, 1, Core.LINE_AA, 0); |
||||
//! [visualization1]
|
||||
} |
||||
|
||||
private double getOrientation(MatOfPoint ptsMat, Mat img) { |
||||
List<Point> pts = ptsMat.toList(); |
||||
//! [pca]
|
||||
// Construct a buffer used by the pca analysis
|
||||
int sz = pts.size(); |
||||
Mat dataPts = new Mat(sz, 2, CvType.CV_64F); |
||||
double[] dataPtsData = new double[(int) (dataPts.total() * dataPts.channels())]; |
||||
for (int i = 0; i < dataPts.rows(); i++) { |
||||
dataPtsData[i * dataPts.cols()] = pts.get(i).x; |
||||
dataPtsData[i * dataPts.cols() + 1] = pts.get(i).y; |
||||
} |
||||
dataPts.put(0, 0, dataPtsData); |
||||
|
||||
// Perform PCA analysis
|
||||
Mat mean = new Mat(); |
||||
Mat eigenvectors = new Mat(); |
||||
Mat eigenvalues = new Mat(); |
||||
Core.PCACompute2(dataPts, mean, eigenvectors, eigenvalues); |
||||
double[] meanData = new double[(int) (mean.total() * mean.channels())]; |
||||
mean.get(0, 0, meanData); |
||||
|
||||
// Store the center of the object
|
||||
Point cntr = new Point(meanData[0], meanData[1]); |
||||
|
||||
// Store the eigenvalues and eigenvectors
|
||||
double[] eigenvectorsData = new double[(int) (eigenvectors.total() * eigenvectors.channels())]; |
||||
double[] eigenvaluesData = new double[(int) (eigenvalues.total() * eigenvalues.channels())]; |
||||
eigenvectors.get(0, 0, eigenvectorsData); |
||||
eigenvalues.get(0, 0, eigenvaluesData); |
||||
//! [pca]
|
||||
|
||||
//! [visualization]
|
||||
// Draw the principal components
|
||||
Imgproc.circle(img, cntr, 3, new Scalar(255, 0, 255), 2); |
||||
Point p1 = new Point(cntr.x + 0.02 * eigenvectorsData[0] * eigenvaluesData[0], |
||||
cntr.y + 0.02 * eigenvectorsData[1] * eigenvaluesData[0]); |
||||
Point p2 = new Point(cntr.x - 0.02 * eigenvectorsData[2] * eigenvaluesData[1], |
||||
cntr.y - 0.02 * eigenvectorsData[3] * eigenvaluesData[1]); |
||||
drawAxis(img, cntr, p1, new Scalar(0, 255, 0), 1); |
||||
drawAxis(img, cntr, p2, new Scalar(255, 255, 0), 5); |
||||
|
||||
double angle = Math.atan2(eigenvectorsData[1], eigenvectorsData[0]); // orientation in radians
|
||||
//! [visualization]
|
||||
|
||||
return angle; |
||||
} |
||||
|
||||
public void run(String[] args) { |
||||
//! [pre-process]
|
||||
// Load image
|
||||
String filename = args.length > 0 ? args[0] : "../data/pca_test1.jpg"; |
||||
Mat src = Imgcodecs.imread(filename); |
||||
|
||||
// Check if image is loaded successfully
|
||||
if (src.empty()) { |
||||
System.err.println("Cannot read image: " + filename); |
||||
System.exit(0); |
||||
} |
||||
|
||||
Mat srcOriginal = src.clone(); |
||||
HighGui.imshow("src", srcOriginal); |
||||
|
||||
// Convert image to grayscale
|
||||
Mat gray = new Mat(); |
||||
Imgproc.cvtColor(src, gray, Imgproc.COLOR_BGR2GRAY); |
||||
|
||||
// Convert image to binary
|
||||
Mat bw = new Mat(); |
||||
Imgproc.threshold(gray, bw, 50, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); |
||||
//! [pre-process]
|
||||
|
||||
//! [contours]
|
||||
// Find all the contours in the thresholded image
|
||||
List<MatOfPoint> contours = new ArrayList<>(); |
||||
Mat hierarchy = new Mat(); |
||||
Imgproc.findContours(bw, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE); |
||||
|
||||
for (int i = 0; i < contours.size(); i++) { |
||||
// Calculate the area of each contour
|
||||
double area = Imgproc.contourArea(contours.get(i)); |
||||
// Ignore contours that are too small or too large
|
||||
if (area < 1e2 || 1e5 < area) |
||||
continue; |
||||
|
||||
// Draw each contour only for visualisation purposes
|
||||
Imgproc.drawContours(src, contours, i, new Scalar(0, 0, 255), 2); |
||||
// Find the orientation of each shape
|
||||
getOrientation(contours.get(i), src); |
||||
} |
||||
//! [contours]
|
||||
|
||||
HighGui.imshow("output", src); |
||||
|
||||
HighGui.waitKey(); |
||||
System.exit(0); |
||||
} |
||||
} |
||||
|
||||
public class IntroductionToPCADemo { |
||||
public static void main(String[] args) { |
||||
// Load the native OpenCV library
|
||||
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||
|
||||
new IntroductionToPCA().run(args); |
||||
} |
||||
} |
@ -0,0 +1,99 @@ |
||||
import org.opencv.core.Core; |
||||
import org.opencv.core.CvType; |
||||
import org.opencv.core.Mat; |
||||
import org.opencv.core.Point; |
||||
import org.opencv.core.Scalar; |
||||
import org.opencv.core.TermCriteria; |
||||
import org.opencv.highgui.HighGui; |
||||
import org.opencv.imgcodecs.Imgcodecs; |
||||
import org.opencv.imgproc.Imgproc; |
||||
import org.opencv.ml.Ml; |
||||
import org.opencv.ml.SVM; |
||||
|
||||
public class IntroductionToSVMDemo { |
||||
public static void main(String[] args) { |
||||
// Load the native OpenCV library
|
||||
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||
|
||||
// Set up training data
|
||||
//! [setup1]
|
||||
int[] labels = { 1, -1, -1, -1 }; |
||||
float[] trainingData = { 501, 10, 255, 10, 501, 255, 10, 501 }; |
||||
//! [setup1]
|
||||
//! [setup2]
|
||||
Mat trainingDataMat = new Mat(4, 2, CvType.CV_32FC1); |
||||
trainingDataMat.put(0, 0, trainingData); |
||||
Mat labelsMat = new Mat(4, 1, CvType.CV_32SC1); |
||||
labelsMat.put(0, 0, labels); |
||||
//! [setup2]
|
||||
|
||||
// Train the SVM
|
||||
//! [init]
|
||||
SVM svm = SVM.create(); |
||||
svm.setType(SVM.C_SVC); |
||||
svm.setKernel(SVM.LINEAR); |
||||
svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, 100, 1e-6)); |
||||
//! [init]
|
||||
//! [train]
|
||||
svm.train(trainingDataMat, Ml.ROW_SAMPLE, labelsMat); |
||||
//! [train]
|
||||
|
||||
// Data for visual representation
|
||||
int width = 512, height = 512; |
||||
Mat image = Mat.zeros(height, width, CvType.CV_8UC3); |
||||
|
||||
// Show the decision regions given by the SVM
|
||||
//! [show]
|
||||
byte[] imageData = new byte[(int) (image.total() * image.channels())]; |
||||
Mat sampleMat = new Mat(1, 2, CvType.CV_32F); |
||||
float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; |
||||
for (int i = 0; i < image.rows(); i++) { |
||||
for (int j = 0; j < image.cols(); j++) { |
||||
sampleMatData[0] = j; |
||||
sampleMatData[1] = i; |
||||
sampleMat.put(0, 0, sampleMatData); |
||||
float response = svm.predict(sampleMat); |
||||
|
||||
if (response == 1) { |
||||
imageData[(i * image.cols() + j) * image.channels()] = 0; |
||||
imageData[(i * image.cols() + j) * image.channels() + 1] = (byte) 255; |
||||
imageData[(i * image.cols() + j) * image.channels() + 2] = 0; |
||||
} else if (response == -1) { |
||||
imageData[(i * image.cols() + j) * image.channels()] = (byte) 255; |
||||
imageData[(i * image.cols() + j) * image.channels() + 1] = 0; |
||||
imageData[(i * image.cols() + j) * image.channels() + 2] = 0; |
||||
} |
||||
} |
||||
} |
||||
image.put(0, 0, imageData); |
||||
//! [show]
|
||||
|
||||
// Show the training data
|
||||
//! [show_data]
|
||||
int thickness = -1; |
||||
int lineType = Core.LINE_8; |
||||
Imgproc.circle(image, new Point(501, 10), 5, new Scalar(0, 0, 0), thickness, lineType, 0); |
||||
Imgproc.circle(image, new Point(255, 10), 5, new Scalar(255, 255, 255), thickness, lineType, 0); |
||||
Imgproc.circle(image, new Point(501, 255), 5, new Scalar(255, 255, 255), thickness, lineType, 0); |
||||
Imgproc.circle(image, new Point(10, 501), 5, new Scalar(255, 255, 255), thickness, lineType, 0); |
||||
//! [show_data]
|
||||
|
||||
// Show support vectors
|
||||
//! [show_vectors]
|
||||
thickness = 2; |
||||
Mat sv = svm.getUncompressedSupportVectors(); |
||||
float[] svData = new float[(int) (sv.total() * sv.channels())]; |
||||
sv.get(0, 0, svData); |
||||
for (int i = 0; i < sv.rows(); ++i) { |
||||
Imgproc.circle(image, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, |
||||
new Scalar(128, 128, 128), thickness, lineType, 0); |
||||
} |
||||
//! [show_vectors]
|
||||
|
||||
Imgcodecs.imwrite("result.png", image); // save the image
|
||||
|
||||
HighGui.imshow("SVM Simple Example", image); // show it to the user
|
||||
HighGui.waitKey(); |
||||
System.exit(0); |
||||
} |
||||
} |
@ -0,0 +1,186 @@ |
||||
import java.util.Random; |
||||
|
||||
import org.opencv.core.Core; |
||||
import org.opencv.core.CvType; |
||||
import org.opencv.core.Mat; |
||||
import org.opencv.core.Point; |
||||
import org.opencv.core.Scalar; |
||||
import org.opencv.core.TermCriteria; |
||||
import org.opencv.highgui.HighGui; |
||||
import org.opencv.imgcodecs.Imgcodecs; |
||||
import org.opencv.imgproc.Imgproc; |
||||
import org.opencv.ml.Ml; |
||||
import org.opencv.ml.SVM; |
||||
|
||||
public class NonLinearSVMsDemo { |
||||
public static final int NTRAINING_SAMPLES = 100; |
||||
public static final float FRAC_LINEAR_SEP = 0.9f; |
||||
|
||||
public static void main(String[] args) { |
||||
// Load the native OpenCV library
|
||||
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); |
||||
|
||||
System.out.println("\n--------------------------------------------------------------------------"); |
||||
System.out.println("This program shows Support Vector Machines for Non-Linearly Separable Data. "); |
||||
System.out.println("--------------------------------------------------------------------------\n"); |
||||
|
||||
// Data for visual representation
|
||||
int width = 512, height = 512; |
||||
Mat I = Mat.zeros(height, width, CvType.CV_8UC3); |
||||
|
||||
// --------------------- 1. Set up training data randomly---------------------------------------
|
||||
Mat trainData = new Mat(2 * NTRAINING_SAMPLES, 2, CvType.CV_32F); |
||||
Mat labels = new Mat(2 * NTRAINING_SAMPLES, 1, CvType.CV_32S); |
||||
|
||||
Random rng = new Random(100); // Random value generation class
|
||||
|
||||
// Set up the linearly separable part of the training data
|
||||
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); |
||||
|
||||
//! [setup1]
|
||||
// Generate random points for the class 1
|
||||
Mat trainClass = trainData.rowRange(0, nLinearSamples); |
||||
// The x coordinate of the points is in [0, 0.4)
|
||||
Mat c = trainClass.colRange(0, 1); |
||||
float[] cData = new float[(int) (c.total() * c.channels())]; |
||||
double[] cDataDbl = rng.doubles(cData.length, 0, 0.4f * width).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1, 2); |
||||
cData = new float[(int) (c.total() * c.channels())]; |
||||
cDataDbl = rng.doubles(cData.length, 0, height).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
|
||||
// Generate random points for the class 2
|
||||
trainClass = trainData.rowRange(2 * NTRAINING_SAMPLES - nLinearSamples, 2 * NTRAINING_SAMPLES); |
||||
// The x coordinate of the points is in [0.6, 1]
|
||||
c = trainClass.colRange(0, 1); |
||||
cData = new float[(int) (c.total() * c.channels())]; |
||||
cDataDbl = rng.doubles(cData.length, 0.6 * width, width).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1, 2); |
||||
cData = new float[(int) (c.total() * c.channels())]; |
||||
cDataDbl = rng.doubles(cData.length, 0, height).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
//! [setup1]
|
||||
|
||||
// ------------------ Set up the non-linearly separable part of the training data ---------------
|
||||
//! [setup2]
|
||||
// Generate random points for the classes 1 and 2
|
||||
trainClass = trainData.rowRange(nLinearSamples, 2 * NTRAINING_SAMPLES - nLinearSamples); |
||||
// The x coordinate of the points is in [0.4, 0.6)
|
||||
c = trainClass.colRange(0, 1); |
||||
cData = new float[(int) (c.total() * c.channels())]; |
||||
cDataDbl = rng.doubles(cData.length, 0.4 * width, 0.6 * width).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1, 2); |
||||
cData = new float[(int) (c.total() * c.channels())]; |
||||
cDataDbl = rng.doubles(cData.length, 0, height).toArray(); |
||||
for (int i = 0; i < cData.length; i++) { |
||||
cData[i] = (float) cDataDbl[i]; |
||||
} |
||||
c.put(0, 0, cData); |
||||
//! [setup2]
|
||||
|
||||
// ------------------------- Set up the labels for the classes---------------------------------
|
||||
labels.rowRange(0, NTRAINING_SAMPLES).setTo(new Scalar(1)); // Class 1
|
||||
labels.rowRange(NTRAINING_SAMPLES, 2 * NTRAINING_SAMPLES).setTo(new Scalar(2)); // Class 2
|
||||
|
||||
// ------------------------ 2. Set up the support vector machines parameters--------------------
|
||||
System.out.println("Starting training process"); |
||||
//! [init]
|
||||
SVM svm = SVM.create(); |
||||
svm.setType(SVM.C_SVC); |
||||
svm.setC(0.1); |
||||
svm.setKernel(SVM.LINEAR); |
||||
svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, (int) 1e7, 1e-6)); |
||||
//! [init]
|
||||
|
||||
// ------------------------ 3. Train the svm----------------------------------------------------
|
||||
//! [train]
|
||||
svm.train(trainData, Ml.ROW_SAMPLE, labels); |
||||
//! [train]
|
||||
System.out.println("Finished training process"); |
||||
|
||||
// ------------------------ 4. Show the decision regions----------------------------------------
|
||||
//! [show]
|
||||
byte[] IData = new byte[(int) (I.total() * I.channels())]; |
||||
Mat sampleMat = new Mat(1, 2, CvType.CV_32F); |
||||
float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; |
||||
for (int i = 0; i < I.rows(); i++) { |
||||
for (int j = 0; j < I.cols(); j++) { |
||||
sampleMatData[0] = j; |
||||
sampleMatData[1] = i; |
||||
sampleMat.put(0, 0, sampleMatData); |
||||
float response = svm.predict(sampleMat); |
||||
|
||||
if (response == 1) { |
||||
IData[(i * I.cols() + j) * I.channels()] = 0; |
||||
IData[(i * I.cols() + j) * I.channels() + 1] = 100; |
||||
IData[(i * I.cols() + j) * I.channels() + 2] = 0; |
||||
} else if (response == 2) { |
||||
IData[(i * I.cols() + j) * I.channels()] = 100; |
||||
IData[(i * I.cols() + j) * I.channels() + 1] = 0; |
||||
IData[(i * I.cols() + j) * I.channels() + 2] = 0; |
||||
} |
||||
} |
||||
} |
||||
I.put(0, 0, IData); |
||||
//! [show]
|
||||
|
||||
// ----------------------- 5. Show the training data--------------------------------------------
|
||||
//! [show_data]
|
||||
int thick = -1; |
||||
int lineType = Core.LINE_8; |
||||
float px, py; |
||||
// Class 1
|
||||
float[] trainDataData = new float[(int) (trainData.total() * trainData.channels())]; |
||||
trainData.get(0, 0, trainDataData); |
||||
for (int i = 0; i < NTRAINING_SAMPLES; i++) { |
||||
px = trainDataData[i * trainData.cols()]; |
||||
py = trainDataData[i * trainData.cols() + 1]; |
||||
Imgproc.circle(I, new Point(px, py), 3, new Scalar(0, 255, 0), thick, lineType, 0); |
||||
} |
||||
// Class 2
|
||||
for (int i = NTRAINING_SAMPLES; i < 2 * NTRAINING_SAMPLES; ++i) { |
||||
px = trainDataData[i * trainData.cols()]; |
||||
py = trainDataData[i * trainData.cols() + 1]; |
||||
Imgproc.circle(I, new Point(px, py), 3, new Scalar(255, 0, 0), thick, lineType, 0); |
||||
} |
||||
//! [show_data]
|
||||
|
||||
// ------------------------- 6. Show support vectors--------------------------------------------
|
||||
//! [show_vectors]
|
||||
thick = 2; |
||||
Mat sv = svm.getUncompressedSupportVectors(); |
||||
float[] svData = new float[(int) (sv.total() * sv.channels())]; |
||||
sv.get(0, 0, svData); |
||||
for (int i = 0; i < sv.rows(); i++) { |
||||
Imgproc.circle(I, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, new Scalar(128, 128, 128), |
||||
thick, lineType, 0); |
||||
} |
||||
//! [show_vectors]
|
||||
|
||||
Imgcodecs.imwrite("result.png", I); // save the Image
|
||||
HighGui.imshow("SVM for Non-Linear Training Data", I); // show it to the user
|
||||
HighGui.waitKey(); |
||||
System.exit(0); |
||||
} |
||||
} |
@ -0,0 +1,100 @@ |
||||
from __future__ import print_function |
||||
from __future__ import division |
||||
import cv2 as cv |
||||
import numpy as np |
||||
import argparse |
||||
from math import atan2, cos, sin, sqrt, pi |
||||
|
||||
def drawAxis(img, p_, q_, colour, scale): |
||||
p = list(p_) |
||||
q = list(q_) |
||||
## [visualization1] |
||||
angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians |
||||
hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0])) |
||||
|
||||
# Here we lengthen the arrow by a factor of scale |
||||
q[0] = p[0] - scale * hypotenuse * cos(angle) |
||||
q[1] = p[1] - scale * hypotenuse * sin(angle) |
||||
cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) |
||||
|
||||
# create the arrow hooks |
||||
p[0] = q[0] + 9 * cos(angle + pi / 4) |
||||
p[1] = q[1] + 9 * sin(angle + pi / 4) |
||||
cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) |
||||
|
||||
p[0] = q[0] + 9 * cos(angle - pi / 4) |
||||
p[1] = q[1] + 9 * sin(angle - pi / 4) |
||||
cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) |
||||
## [visualization1] |
||||
|
||||
def getOrientation(pts, img): |
||||
## [pca] |
||||
# Construct a buffer used by the pca analysis |
||||
sz = len(pts) |
||||
data_pts = np.empty((sz, 2), dtype=np.float64) |
||||
for i in range(data_pts.shape[0]): |
||||
data_pts[i,0] = pts[i,0,0] |
||||
data_pts[i,1] = pts[i,0,1] |
||||
|
||||
# Perform PCA analysis |
||||
mean = np.empty((0)) |
||||
mean, eigenvectors, eigenvalues = cv.PCACompute2(data_pts, mean) |
||||
|
||||
# Store the center of the object |
||||
cntr = (int(mean[0,0]), int(mean[0,1])) |
||||
## [pca] |
||||
|
||||
## [visualization] |
||||
# Draw the principal components |
||||
cv.circle(img, cntr, 3, (255, 0, 255), 2) |
||||
p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0]) |
||||
p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0]) |
||||
drawAxis(img, cntr, p1, (0, 255, 0), 1) |
||||
drawAxis(img, cntr, p2, (255, 255, 0), 5) |
||||
|
||||
angle = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians |
||||
## [visualization] |
||||
|
||||
return angle |
||||
|
||||
## [pre-process] |
||||
# Load image |
||||
parser = argparse.ArgumentParser(description='Code for Introduction to Principal Component Analysis (PCA) tutorial.\ |
||||
This program demonstrates how to use OpenCV PCA to extract the orientation of an object.') |
||||
parser.add_argument('--input', help='Path to input image.', default='../data/pca_test1.jpg') |
||||
args = parser.parse_args() |
||||
|
||||
src = cv.imread(args.input) |
||||
# Check if image is loaded successfully |
||||
if src is None: |
||||
print('Could not open or find the image: ', args.input) |
||||
exit(0) |
||||
|
||||
cv.imshow('src', src) |
||||
|
||||
# Convert image to grayscale |
||||
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) |
||||
|
||||
# Convert image to binary |
||||
_, bw = cv.threshold(gray, 50, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) |
||||
## [pre-process] |
||||
|
||||
## [contours] |
||||
# Find all the contours in the thresholded image |
||||
_, contours, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE) |
||||
|
||||
for i, c in enumerate(contours): |
||||
# Calculate the area of each contour |
||||
area = cv.contourArea(c); |
||||
# Ignore contours that are too small or too large |
||||
if area < 1e2 or 1e5 < area: |
||||
continue |
||||
|
||||
# Draw each contour only for visualisation purposes |
||||
cv.drawContours(src, contours, i, (0, 0, 255), 2); |
||||
# Find the orientation of each shape |
||||
getOrientation(c, src) |
||||
## [contours] |
||||
|
||||
cv.imshow('output', src) |
||||
cv.waitKey() |
@ -0,0 +1,62 @@ |
||||
import cv2 as cv |
||||
import numpy as np |
||||
|
||||
# Set up training data |
||||
## [setup1] |
||||
labels = np.array([1, -1, -1, -1]) |
||||
trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) |
||||
## [setup1] |
||||
|
||||
# Train the SVM |
||||
## [init] |
||||
svm = cv.ml.SVM_create() |
||||
svm.setType(cv.ml.SVM_C_SVC) |
||||
svm.setKernel(cv.ml.SVM_LINEAR) |
||||
svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6)) |
||||
## [init] |
||||
## [train] |
||||
svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) |
||||
## [train] |
||||
|
||||
# Data for visual representation |
||||
width = 512 |
||||
height = 512 |
||||
image = np.zeros((height, width, 3), dtype=np.uint8) |
||||
|
||||
# Show the decision regions given by the SVM |
||||
## [show] |
||||
green = (0,255,0) |
||||
blue = (255,0,0) |
||||
for i in range(image.shape[0]): |
||||
for j in range(image.shape[1]): |
||||
sampleMat = np.matrix([[j,i]], dtype=np.float32) |
||||
response = svm.predict(sampleMat)[1] |
||||
|
||||
if response == 1: |
||||
image[i,j] = green |
||||
elif response == -1: |
||||
image[i,j] = blue |
||||
## [show] |
||||
|
||||
# Show the training data |
||||
## [show_data] |
||||
thickness = -1 |
||||
cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness) |
||||
cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) |
||||
cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) |
||||
cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness) |
||||
## [show_data] |
||||
|
||||
# Show support vectors |
||||
## [show_vectors] |
||||
thickness = 2 |
||||
sv = svm.getUncompressedSupportVectors() |
||||
|
||||
for i in range(sv.shape[0]): |
||||
cv.circle(image, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thickness) |
||||
## [show_vectors] |
||||
|
||||
cv.imwrite('result.png', image) # save the image |
||||
|
||||
cv.imshow('SVM Simple Example', image) # show it to the user |
||||
cv.waitKey() |
@ -0,0 +1,117 @@ |
||||
from __future__ import print_function |
||||
import cv2 as cv |
||||
import numpy as np |
||||
import random as rng |
||||
|
||||
NTRAINING_SAMPLES = 100 # Number of training samples per class |
||||
FRAC_LINEAR_SEP = 0.9 # Fraction of samples which compose the linear separable part |
||||
|
||||
# Data for visual representation |
||||
WIDTH = 512 |
||||
HEIGHT = 512 |
||||
I = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8) |
||||
|
||||
# --------------------- 1. Set up training data randomly --------------------------------------- |
||||
trainData = np.empty((2*NTRAINING_SAMPLES, 2), dtype=np.float32) |
||||
labels = np.empty((2*NTRAINING_SAMPLES, 1), dtype=np.int32) |
||||
|
||||
rng.seed(100) # Random value generation class |
||||
|
||||
# Set up the linearly separable part of the training data |
||||
nLinearSamples = int(FRAC_LINEAR_SEP * NTRAINING_SAMPLES) |
||||
|
||||
## [setup1] |
||||
# Generate random points for the class 1 |
||||
trainClass = trainData[0:nLinearSamples,:] |
||||
# The x coordinate of the points is in [0, 0.4) |
||||
c = trainClass[:,0:1] |
||||
c[:] = np.random.uniform(0.0, 0.4 * WIDTH, c.shape) |
||||
# The y coordinate of the points is in [0, 1) |
||||
c = trainClass[:,1:2] |
||||
c[:] = np.random.uniform(0.0, HEIGHT, c.shape) |
||||
|
||||
# Generate random points for the class 2 |
||||
trainClass = trainData[2*NTRAINING_SAMPLES-nLinearSamples:2*NTRAINING_SAMPLES,:] |
||||
# The x coordinate of the points is in [0.6, 1] |
||||
c = trainClass[:,0:1] |
||||
c[:] = np.random.uniform(0.6*WIDTH, WIDTH, c.shape) |
||||
# The y coordinate of the points is in [0, 1) |
||||
c = trainClass[:,1:2] |
||||
c[:] = np.random.uniform(0.0, HEIGHT, c.shape) |
||||
## [setup1] |
||||
|
||||
#------------------ Set up the non-linearly separable part of the training data --------------- |
||||
## [setup2] |
||||
# Generate random points for the classes 1 and 2 |
||||
trainClass = trainData[nLinearSamples:2*NTRAINING_SAMPLES-nLinearSamples,:] |
||||
# The x coordinate of the points is in [0.4, 0.6) |
||||
c = trainClass[:,0:1] |
||||
c[:] = np.random.uniform(0.4*WIDTH, 0.6*WIDTH, c.shape) |
||||
# The y coordinate of the points is in [0, 1) |
||||
c = trainClass[:,1:2] |
||||
c[:] = np.random.uniform(0.0, HEIGHT, c.shape) |
||||
## [setup2] |
||||
|
||||
#------------------------- Set up the labels for the classes --------------------------------- |
||||
labels[0:NTRAINING_SAMPLES,:] = 1 # Class 1 |
||||
labels[NTRAINING_SAMPLES:2*NTRAINING_SAMPLES,:] = 2 # Class 2 |
||||
|
||||
#------------------------ 2. Set up the support vector machines parameters -------------------- |
||||
print('Starting training process') |
||||
## [init] |
||||
svm = cv.ml.SVM_create() |
||||
svm.setType(cv.ml.SVM_C_SVC) |
||||
svm.setC(0.1) |
||||
svm.setKernel(cv.ml.SVM_LINEAR) |
||||
svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, int(1e7), 1e-6)) |
||||
## [init] |
||||
|
||||
#------------------------ 3. Train the svm ---------------------------------------------------- |
||||
## [train] |
||||
svm.train(trainData, cv.ml.ROW_SAMPLE, labels) |
||||
## [train] |
||||
print('Finished training process') |
||||
|
||||
#------------------------ 4. Show the decision regions ---------------------------------------- |
||||
## [show] |
||||
green = (0,100,0) |
||||
blue = (100,0,0) |
||||
for i in range(I.shape[0]): |
||||
for j in range(I.shape[1]): |
||||
sampleMat = np.matrix([[j,i]], dtype=np.float32) |
||||
response = svm.predict(sampleMat)[1] |
||||
|
||||
if response == 1: |
||||
I[i,j] = green |
||||
elif response == 2: |
||||
I[i,j] = blue |
||||
## [show] |
||||
|
||||
#----------------------- 5. Show the training data -------------------------------------------- |
||||
## [show_data] |
||||
thick = -1 |
||||
# Class 1 |
||||
for i in range(NTRAINING_SAMPLES): |
||||
px = trainData[i,0] |
||||
py = trainData[i,1] |
||||
cv.circle(I, (px, py), 3, (0, 255, 0), thick) |
||||
|
||||
# Class 2 |
||||
for i in range(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES): |
||||
px = trainData[i,0] |
||||
py = trainData[i,1] |
||||
cv.circle(I, (px, py), 3, (255, 0, 0), thick) |
||||
## [show_data] |
||||
|
||||
#------------------------- 6. Show support vectors -------------------------------------------- |
||||
## [show_vectors] |
||||
thick = 2 |
||||
sv = svm.getUncompressedSupportVectors() |
||||
|
||||
for i in range(sv.shape[0]): |
||||
cv.circle(I, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thick) |
||||
## [show_vectors] |
||||
|
||||
cv.imwrite('result.png', I) # save the Image |
||||
cv.imshow('SVM for Non-Linear Training Data', I) # show it to the user |
||||
cv.waitKey() |
Loading…
Reference in new issue