Commit 9a54b06c authored by vincent's avatar vincent

fixed examples

parent df5ac5da
function resizeCanvasAndResults(dimensions, canvas, results) {
const { width, height } = dimensions instanceof HTMLVideoElement
? faceapi.getMediaDimensions(dimensions)
: dimensions
canvas.width = width
canvas.height = height
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return faceapi.resizeResults(results, { width, height })
}
function drawDetections(dimensions, canvas, detections) {
const resizedDetections = resizeCanvasAndResults(dimensions, canvas, detections)
faceapi.drawDetection(canvas, resizedDetections)
}
function drawLandmarks(dimensions, canvas, results, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection))
}
const faceLandmarks = resizedResults.map(det => det.landmarks)
const drawLandmarksOptions = {
lineWidth: 2,
drawLines: true,
color: 'green'
}
faceapi.drawLandmarks(canvas, faceLandmarks, drawLandmarksOptions)
}
function drawExpressions(dimensions, canvas, results, thresh, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection), { withScore: false })
}
faceapi.drawFaceExpressions(canvas, resizedResults.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
}
\ No newline at end of file
......@@ -65,7 +65,7 @@
function drawLandmarkCanvas(img, landmarks) {
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: 2 , drawLines: true })
new faceapi.draw.DrawFaceLandmarks(landmarks).draw(canvas)
}
async function runLandmarkDetection(useBatchInput) {
......
......@@ -68,13 +68,10 @@
$('#faceContainer').append(canvas)
const x = 20, y = canvas.height - 20
faceapi.drawText(
canvas.getContext('2d'),
x,
y,
faceMatcher.findBestMatch(descriptor).toString(),
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
const ctx = faceapi.getContext2dOrThrow(canvas)
ctx.font = '16px Georgia'
ctx.fillStyle = 'red'
ctx.fillText(faceMatcher.findBestMatch(descriptor).toString(), x, y)
}
async function runComputeFaceDescriptors(useBatchInput) {
......
......@@ -43,12 +43,12 @@
const canvas = faceapi.createCanvasFromMedia(currentImg)
$('#faceContainer').empty()
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: drawLines ? 2 : 4, drawLines })
new faceapi.draw.DrawFaceLandmarks(landmarks, { drawLines }).draw(canvas)
}
async function onSelectionChanged(uri) {
currentImg = await faceapi.fetchImage(uri)
landmarks = await faceapi.detectLandmarks(currentImg)
landmarks = await faceapi.detectFaceLandmarks(currentImg)
redraw()
}
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<script src="js/bbt.js"></script>
......@@ -159,17 +158,19 @@
function drawFaceRecognitionResults(results) {
const canvas = $('#overlay').get(0)
const inputImgEl = $('#inputImg').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults($('#inputImg').get(0), canvas, results)
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -148,7 +147,9 @@
const results = await faceapi.detectAllFaces(inputImgEl, options)
drawDetections(inputImgEl, $('#overlay').get(0), results)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(results, inputImgEl))
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -150,7 +149,14 @@
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceExpressions()
drawExpressions(inputImgEl, $('#overlay').get(0), results, thresh, true)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
const minConfidence = 0.05
faceapi.draw.drawDetections(canvas, resizedResults)
faceapi.draw.drawFaceExpressions(canvas, resizedResults, minConfidence)
}
async function run() {
......
......@@ -150,9 +150,7 @@
function displayExtractedFaces(faceImages) {
const canvas = $('#overlay').get(0)
const { width, height } = $('#inputImg').get(0)
canvas.width = width
canvas.height = height
faceapi.matchDimensions(canvas, $('#inputImg').get(0))
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -162,7 +161,14 @@
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceLandmarks()
drawLandmarks(inputImgEl, $('#overlay').get(0), results, withBoxes)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -205,11 +204,11 @@
}
async function updateReferenceImageResults() {
const imgEl = $('#refImg').get(0)
const inputImgEl = $('#refImg').get(0)
const canvas = $('#refImgOverlay').get(0)
const fullFaceDescriptions = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -221,16 +220,19 @@
// from the detection results for the reference image
faceMatcher = new faceapi.FaceMatcher(fullFaceDescriptions)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, fullFaceDescriptions)
const resizedResults = faceapi.resizeResults(fullFaceDescriptions, inputImgEl)
// draw boxes with the corresponding label as text
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const boxesWithText = resizedResults
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
faceapi.drawDetection(canvas, boxesWithText)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateQueryImageResults() {
......@@ -238,27 +240,25 @@
return
}
const imgEl = $('#queryImg').get(0)
const inputImgEl = $('#queryImg').get(0)
const canvas = $('#queryImgOverlay').get(0)
const results = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, results)
// draw boxes with the corresponding label as text
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
// match each face descriptor to the reference descriptor
// with lowest euclidean distance and display the result as text
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateResults() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -169,18 +168,25 @@
const ts = Date.now()
const faceDetectionTask = faceapi.detectAllFaces(videoEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const drawBoxes = withBoxes
const drawLandmarks = withFaceLandmarks
let task = faceapi.detectAllFaces(videoEl, options)
task = withFaceLandmarks ? task.withFaceLandmarks() : task
const results = await task
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
drawFunction(videoEl, $('#overlay').get(0), results, withBoxes)
const resizedResults = faceapi.resizeResults(results, dims)
if (drawBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
if (drawLandmarks) {
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
setTimeout(() => onPlay(videoEl))
}
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -159,7 +158,9 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawDetections(videoEl, $('#overlay').get(0), [result])
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, videoEl)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(results, videoEl))
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -171,7 +170,15 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawExpressions(videoEl, $('#overlay').get(0), [result], withBoxes)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, videoEl)
const minConfidence = 0.05
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
faceapi.draw.drawFaceExpressions(canvas, resizedResults, minConfidence)
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......
......@@ -10,7 +10,7 @@ async function run() {
const detections = await faceapi.detectAllFaces(img, faceDetectionOptions)
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, detections)
faceapi.draw.drawDetections(out, detections)
saveFile('faceDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceDetection.jpg')
......
......@@ -12,8 +12,8 @@ async function run() {
.withFaceExpressions()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection), { withScore: false })
faceapi.drawFaceExpressions(out, results.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceExpressions(out, results)
saveFile('faceExpressionRecognition.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceExpressionRecognition.jpg')
......
......@@ -12,8 +12,8 @@ async function run() {
.withFaceLandmarks()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.landmarks), { drawLines: true, color: 'red' })
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceLandmarks(out, results.map(res => res.landmarks))
saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceLandmarkDetection.jpg')
......
......@@ -26,20 +26,21 @@ async function run() {
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const refBoxesWithText = resultsRef
const refDrawBoxes = resultsRef
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
const outRef = faceapi.createCanvasFromMedia(referenceImage) as any
faceapi.drawDetection(outRef, refBoxesWithText)
saveFile('referenceImage.jpg', outRef.toBuffer('image/jpeg'))
.map((box, i) => new faceapi.draw.DrawBox(box, { label: labels[i] }))
const outRef = faceapi.createCanvasFromMedia(referenceImage)
refDrawBoxes.forEach(drawBox => drawBox.draw(outRef))
const queryBoxesWithText = resultsQuery.map(res => {
saveFile('referenceImage.jpg', (outRef as any).toBuffer('image/jpeg'))
const queryDrawBoxes = resultsQuery.map(res => {
const bestMatch = faceMatcher.findBestMatch(res.descriptor)
return new faceapi.BoxWithText(res.detection.box, bestMatch.toString())
return new faceapi.draw.DrawBox(res.detection.box, { label: bestMatch.toString() })
})
const outQuery = faceapi.createCanvasFromMedia(queryImage) as any
faceapi.drawDetection(outQuery, queryBoxesWithText)
saveFile('queryImage.jpg', outQuery.toBuffer('image/jpeg'))
const outQuery = faceapi.createCanvasFromMedia(queryImage)
queryDrawBoxes.forEach(drawBox => drawBox.draw(outQuery))
saveFile('queryImage.jpg', (outQuery as any).toBuffer('image/jpeg'))
console.log('done, saved results to out/queryImage.jpg')
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment