Merge pull request #3712 from kaingwade:clean_haarcascades_jsbindings
Fix broken js build after moving HaarCascades to contribpull/3729/head
commit
fe06856b0c
2 changed files with 242 additions and 0 deletions
@ -0,0 +1,100 @@ |
||||
<!DOCTYPE html> |
||||
<html> |
||||
<head> |
||||
<meta charset="utf-8"> |
||||
<title>Face Detection Example</title> |
||||
<link href="js_example_style.css" rel="stylesheet" type="text/css" /> |
||||
</head> |
||||
<body> |
||||
<h2>Face Detection Example</h2> |
||||
<p> |
||||
<canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br> |
||||
Click <b>Try it</b> button to see the result. You can choose another image.<br> |
||||
You can change the code in the <textarea> to investigate more. |
||||
</p> |
||||
<div> |
||||
<div class="control"><button id="tryIt" disabled>Try it</button></div> |
||||
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"> |
||||
</textarea> |
||||
<p class="err" id="errorMessage"></p> |
||||
</div> |
||||
<div> |
||||
<table cellpadding="0" cellspacing="0" width="0" border="0"> |
||||
<tr> |
||||
<td> |
||||
<canvas id="canvasInput"></canvas> |
||||
</td> |
||||
<td> |
||||
<canvas id="canvasOutput"></canvas> |
||||
</td> |
||||
</tr> |
||||
<tr> |
||||
<td> |
||||
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div> |
||||
</td> |
||||
<td> |
||||
<div class="caption">canvasOutput</div> |
||||
</td> |
||||
</tr> |
||||
</table> |
||||
</div> |
||||
<script src="utils.js" type="text/javascript"></script> |
||||
<script id="codeSnippet" type="text/code-snippet"> |
||||
let src = cv.imread('canvasInput'); |
||||
let gray = new cv.Mat(); |
||||
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0); |
||||
let faces = new cv.RectVector(); |
||||
let eyes = new cv.RectVector(); |
||||
let faceCascade = new cv.CascadeClassifier(); |
||||
let eyeCascade = new cv.CascadeClassifier(); |
||||
// load pre-trained classifiers |
||||
faceCascade.load('haarcascade_frontalface_default.xml'); |
||||
eyeCascade.load('haarcascade_eye.xml'); |
||||
// detect faces |
||||
let msize = new cv.Size(0, 0); |
||||
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize); |
||||
for (let i = 0; i < faces.size(); ++i) { |
||||
let roiGray = gray.roi(faces.get(i)); |
||||
let roiSrc = src.roi(faces.get(i)); |
||||
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y); |
||||
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width, |
||||
faces.get(i).y + faces.get(i).height); |
||||
cv.rectangle(src, point1, point2, [255, 0, 0, 255]); |
||||
// detect eyes in face ROI |
||||
eyeCascade.detectMultiScale(roiGray, eyes); |
||||
for (let j = 0; j < eyes.size(); ++j) { |
||||
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y); |
||||
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width, |
||||
eyes.get(j).y + eyes.get(j).height); |
||||
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]); |
||||
} |
||||
roiGray.delete(); roiSrc.delete(); |
||||
} |
||||
cv.imshow('canvasOutput', src); |
||||
src.delete(); gray.delete(); faceCascade.delete(); |
||||
eyeCascade.delete(); faces.delete(); eyes.delete(); |
||||
</script> |
||||
<script type="text/javascript"> |
||||
let utils = new Utils('errorMessage'); |
||||
|
||||
utils.loadCode('codeSnippet', 'codeEditor'); |
||||
utils.loadImageToCanvas('lena.jpg', 'canvasInput'); |
||||
utils.addFileInputHandler('fileInput', 'canvasInput'); |
||||
|
||||
let tryIt = document.getElementById('tryIt'); |
||||
tryIt.addEventListener('click', () => { |
||||
utils.executeCode('codeEditor'); |
||||
}); |
||||
|
||||
utils.loadOpenCv(() => { |
||||
let eyeCascadeFile = 'haarcascade_eye.xml'; |
||||
utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => { |
||||
let faceCascadeFile = 'haarcascade_frontalface_default.xml'; |
||||
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => { |
||||
tryIt.removeAttribute('disabled'); |
||||
}); |
||||
}); |
||||
}); |
||||
</script> |
||||
</body> |
||||
</html> |
@ -0,0 +1,142 @@ |
||||
<!DOCTYPE html> |
||||
<html> |
||||
<head> |
||||
<meta charset="utf-8"> |
||||
<title>Face Detection Camera Example</title> |
||||
<link href="js_example_style.css" rel="stylesheet" type="text/css" /> |
||||
</head> |
||||
<body> |
||||
<h2>Face Detection Camera Example</h2> |
||||
<p> |
||||
Click <b>Start/Stop</b> button to start or stop the camera capture.<br> |
||||
The <b>videoInput</b> is a <video> element used as face detector input. |
||||
The <b>canvasOutput</b> is a <canvas> element used as face detector output.<br> |
||||
The code of <textarea> will be executed when video is started. |
||||
You can modify the code to investigate more. |
||||
</p> |
||||
<div> |
||||
<div class="control"><button id="startAndStop" disabled>Start</button></div> |
||||
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false"> |
||||
</textarea> |
||||
</div> |
||||
<p class="err" id="errorMessage"></p> |
||||
<div> |
||||
<table cellpadding="0" cellspacing="0" width="0" border="0"> |
||||
<tr> |
||||
<td> |
||||
<video id="videoInput" width=320 height=240></video> |
||||
</td> |
||||
<td> |
||||
<canvas id="canvasOutput" width=320 height=240></canvas> |
||||
</td> |
||||
<td></td> |
||||
<td></td> |
||||
</tr> |
||||
<tr> |
||||
<td> |
||||
<div class="caption">videoInput</div> |
||||
</td> |
||||
<td> |
||||
<div class="caption">canvasOutput</div> |
||||
</td> |
||||
<td></td> |
||||
<td></td> |
||||
</tr> |
||||
</table> |
||||
</div> |
||||
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script> |
||||
<script src="utils.js" type="text/javascript"></script> |
||||
<script id="codeSnippet" type="text/code-snippet"> |
||||
let video = document.getElementById('videoInput'); |
||||
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4); |
||||
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4); |
||||
let gray = new cv.Mat(); |
||||
let cap = new cv.VideoCapture(video); |
||||
let faces = new cv.RectVector(); |
||||
let classifier = new cv.CascadeClassifier(); |
||||
|
||||
// load pre-trained classifiers |
||||
classifier.load('haarcascade_frontalface_default.xml'); |
||||
|
||||
const FPS = 30; |
||||
function processVideo() { |
||||
try { |
||||
if (!streaming) { |
||||
// clean and stop. |
||||
src.delete(); |
||||
dst.delete(); |
||||
gray.delete(); |
||||
faces.delete(); |
||||
classifier.delete(); |
||||
return; |
||||
} |
||||
let begin = Date.now(); |
||||
// start processing. |
||||
cap.read(src); |
||||
src.copyTo(dst); |
||||
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0); |
||||
// detect faces. |
||||
classifier.detectMultiScale(gray, faces, 1.1, 3, 0); |
||||
// draw faces. |
||||
for (let i = 0; i < faces.size(); ++i) { |
||||
let face = faces.get(i); |
||||
let point1 = new cv.Point(face.x, face.y); |
||||
let point2 = new cv.Point(face.x + face.width, face.y + face.height); |
||||
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]); |
||||
} |
||||
cv.imshow('canvasOutput', dst); |
||||
// schedule the next one. |
||||
let delay = 1000/FPS - (Date.now() - begin); |
||||
setTimeout(processVideo, delay); |
||||
} catch (err) { |
||||
utils.printError(err); |
||||
} |
||||
}; |
||||
|
||||
// schedule the first one. |
||||
setTimeout(processVideo, 0); |
||||
</script> |
||||
<script type="text/javascript"> |
||||
let utils = new Utils('errorMessage'); |
||||
|
||||
utils.loadCode('codeSnippet', 'codeEditor'); |
||||
|
||||
let streaming = false; |
||||
let videoInput = document.getElementById('videoInput'); |
||||
let startAndStop = document.getElementById('startAndStop'); |
||||
let canvasOutput = document.getElementById('canvasOutput'); |
||||
let canvasContext = canvasOutput.getContext('2d'); |
||||
|
||||
startAndStop.addEventListener('click', () => { |
||||
if (!streaming) { |
||||
utils.clearError(); |
||||
utils.startCamera('qvga', onVideoStarted, 'videoInput'); |
||||
} else { |
||||
utils.stopCamera(); |
||||
onVideoStopped(); |
||||
} |
||||
}); |
||||
|
||||
function onVideoStarted() { |
||||
streaming = true; |
||||
startAndStop.innerText = 'Stop'; |
||||
videoInput.width = videoInput.videoWidth; |
||||
videoInput.height = videoInput.videoHeight; |
||||
utils.executeCode('codeEditor'); |
||||
} |
||||
|
||||
function onVideoStopped() { |
||||
streaming = false; |
||||
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height); |
||||
startAndStop.innerText = 'Start'; |
||||
} |
||||
|
||||
utils.loadOpenCv(() => { |
||||
let faceCascadeFile = 'haarcascade_frontalface_default.xml'; |
||||
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => { |
||||
startAndStop.removeAttribute('disabled'); |
||||
}); |
||||
}); |
||||
</script> |
||||
</body> |
||||
</html> |
Loading…
Reference in new issue