Commit 256ee654 authored by vincent's avatar vincent

merge drawing-api into branch

parents e908bbd8 9ca0e1bf
sudo: required
language: node_js
node_js:
- "node"
#- "node"
- "11"
- "10"
- "8"
# node 6 is not compatible with tfjs-node
......
function resizeCanvasAndResults(dimensions, canvas, results) {
const { width, height } = dimensions instanceof HTMLVideoElement
? faceapi.getMediaDimensions(dimensions)
: dimensions
canvas.width = width
canvas.height = height
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return faceapi.resizeResults(results, { width, height })
}
function drawDetections(dimensions, canvas, detections) {
const resizedDetections = resizeCanvasAndResults(dimensions, canvas, detections)
faceapi.drawDetection(canvas, resizedDetections)
}
function drawLandmarks(dimensions, canvas, results, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection))
}
const faceLandmarks = resizedResults.map(det => det.landmarks)
const drawLandmarksOptions = {
lineWidth: 2,
drawLines: true,
color: 'green'
}
faceapi.drawLandmarks(canvas, faceLandmarks, drawLandmarksOptions)
}
function drawExpressions(dimensions, canvas, results, thresh, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection), { withScore: false })
}
faceapi.drawFaceExpressions(canvas, resizedResults.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
}
\ No newline at end of file
......@@ -65,7 +65,7 @@
function drawLandmarkCanvas(img, landmarks) {
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: 2 , drawLines: true })
new faceapi.draw.DrawFaceLandmarks(landmarks).draw(canvas)
}
async function runLandmarkDetection(useBatchInput) {
......
......@@ -68,13 +68,10 @@
$('#faceContainer').append(canvas)
const x = 20, y = canvas.height - 20
faceapi.drawText(
canvas.getContext('2d'),
x,
y,
faceMatcher.findBestMatch(descriptor).toString(),
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
const ctx = faceapi.getContext2dOrThrow(canvas)
ctx.font = '16px Georgia'
ctx.fillStyle = 'red'
ctx.fillText(faceMatcher.findBestMatch(descriptor).toString(), x, y)
}
async function runComputeFaceDescriptors(useBatchInput) {
......
......@@ -43,12 +43,12 @@
const canvas = faceapi.createCanvasFromMedia(currentImg)
$('#faceContainer').empty()
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: drawLines ? 2 : 4, drawLines })
new faceapi.draw.DrawFaceLandmarks(landmarks, { drawLines }).draw(canvas)
}
async function onSelectionChanged(uri) {
currentImg = await faceapi.fetchImage(uri)
landmarks = await faceapi.detectLandmarks(currentImg)
landmarks = await faceapi.detectFaceLandmarks(currentImg)
redraw()
}
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<script src="js/bbt.js"></script>
......@@ -159,17 +158,19 @@
function drawFaceRecognitionResults(results) {
const canvas = $('#overlay').get(0)
const inputImgEl = $('#inputImg').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults($('#inputImg').get(0), canvas, results)
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -148,7 +147,9 @@
const results = await faceapi.detectAllFaces(inputImgEl, options)
drawDetections(inputImgEl, $('#overlay').get(0), results)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(results, inputImgEl))
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -150,7 +149,14 @@
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceExpressions()
drawExpressions(inputImgEl, $('#overlay').get(0), results, thresh, true)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
const minConfidence = 0.05
faceapi.draw.drawDetections(canvas, resizedResults)
faceapi.draw.drawFaceExpressions(canvas, resizedResults, minConfidence)
}
async function run() {
......
......@@ -150,9 +150,7 @@
function displayExtractedFaces(faceImages) {
const canvas = $('#overlay').get(0)
const { width, height } = $('#inputImg').get(0)
canvas.width = width
canvas.height = height
faceapi.matchDimensions(canvas, $('#inputImg').get(0))
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -162,7 +161,14 @@
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceLandmarks()
drawLandmarks(inputImgEl, $('#overlay').get(0), results, withBoxes)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -205,11 +204,11 @@
}
async function updateReferenceImageResults() {
const imgEl = $('#refImg').get(0)
const inputImgEl = $('#refImg').get(0)
const canvas = $('#refImgOverlay').get(0)
const fullFaceDescriptions = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -221,16 +220,19 @@
// from the detection results for the reference image
faceMatcher = new faceapi.FaceMatcher(fullFaceDescriptions)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, fullFaceDescriptions)
const resizedResults = faceapi.resizeResults(fullFaceDescriptions, inputImgEl)
// draw boxes with the corresponding label as text
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const boxesWithText = resizedResults
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
faceapi.drawDetection(canvas, boxesWithText)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateQueryImageResults() {
......@@ -238,27 +240,25 @@
return
}
const imgEl = $('#queryImg').get(0)
const inputImgEl = $('#queryImg').get(0)
const canvas = $('#queryImgOverlay').get(0)
const results = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, results)
// draw boxes with the corresponding label as text
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
// match each face descriptor to the reference descriptor
// with lowest euclidean distance and display the result as text
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateResults() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -169,18 +168,25 @@
const ts = Date.now()
const faceDetectionTask = faceapi.detectAllFaces(videoEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const drawBoxes = withBoxes
const drawLandmarks = withFaceLandmarks
let task = faceapi.detectAllFaces(videoEl, options)
task = withFaceLandmarks ? task.withFaceLandmarks() : task
const results = await task
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
drawFunction(videoEl, $('#overlay').get(0), results, withBoxes)
const resizedResults = faceapi.resizeResults(results, dims)
if (drawBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
if (drawLandmarks) {
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
setTimeout(() => onPlay(videoEl))
}
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -159,7 +158,9 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawDetections(videoEl, $('#overlay').get(0), [result])
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(result, dims))
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -171,7 +170,15 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawExpressions(videoEl, $('#overlay').get(0), [result], withBoxes)
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, dims)
const minConfidence = 0.05
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResult)
}
faceapi.draw.drawFaceExpressions(canvas, resizedResult, minConfidence)
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -171,7 +170,14 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawLandmarks(videoEl, $('#overlay').get(0), [result], withBoxes)
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, dims)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResult)
}
faceapi.draw.drawFaceLandmarks(canvas, resizedResult)
}
setTimeout(() => onPlay())
......
......@@ -10,7 +10,7 @@ async function run() {
const detections = await faceapi.detectAllFaces(img, faceDetectionOptions)
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, detections)
faceapi.draw.drawDetections(out, detections)
saveFile('faceDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceDetection.jpg')
......
......@@ -12,8 +12,8 @@ async function run() {
.withFaceExpressions()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection), { withScore: false })
faceapi.drawFaceExpressions(out, results.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceExpressions(out, results)
saveFile('faceExpressionRecognition.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceExpressionRecognition.jpg')
......
......@@ -12,8 +12,8 @@ async function run() {
.withFaceLandmarks()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.landmarks), { drawLines: true, color: 'red' })
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceLandmarks(out, results.map(res => res.landmarks))
saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceLandmarkDetection.jpg')
......
......@@ -26,20 +26,21 @@ async function run() {
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const refBoxesWithText = resultsRef
const refDrawBoxes = resultsRef
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
const outRef = faceapi.createCanvasFromMedia(referenceImage) as any
faceapi.drawDetection(outRef, refBoxesWithText)
saveFile('referenceImage.jpg', outRef.toBuffer('image/jpeg'))
.map((box, i) => new faceapi.draw.DrawBox(box, { label: labels[i] }))
const outRef = faceapi.createCanvasFromMedia(referenceImage)
refDrawBoxes.forEach(drawBox => drawBox.draw(outRef))
const queryBoxesWithText = resultsQuery.map(res => {
saveFile('referenceImage.jpg', (outRef as any).toBuffer('image/jpeg'))
const queryDrawBoxes = resultsQuery.map(res => {
const bestMatch = faceMatcher.findBestMatch(res.descriptor)
return new faceapi.BoxWithText(res.detection.box, bestMatch.toString())
return new faceapi.draw.DrawBox(res.detection.box, { label: bestMatch.toString() })
})
const outQuery = faceapi.createCanvasFromMedia(queryImage) as any
faceapi.drawDetection(outQuery, queryBoxesWithText)
saveFile('queryImage.jpg', outQuery.toBuffer('image/jpeg'))
const outQuery = faceapi.createCanvasFromMedia(queryImage)
queryDrawBoxes.forEach(drawBox => drawBox.draw(outQuery))
saveFile('queryImage.jpg', (outQuery as any).toBuffer('image/jpeg'))
console.log('done, saved results to out/queryImage.jpg')
}
......
......@@ -1661,14 +1661,14 @@
"dev": true
},
"fsevents": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.7.tgz",
"integrity": "sha512-Pxm6sI2MeBD7RdD12RYsqaP0nMiwx8eZBXCa6z2L+mRHm2DYrOYwihmhjpkdjUHwQhslWQjRpEgNq4XvBmaAuw==",
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.8.tgz",
"integrity": "sha512-tPvHgPGB7m40CZ68xqFGkKuzN+RnpGmSV+hgeKxhRpbxdqKXUFJGC3yonBOLzQBcJyGpdZFDfCsdOC2KFsXzeA==",
"dev": true,
"optional": true,
"requires": {
"nan": "^2.9.2",
"node-pre-gyp": "^0.10.0"
"nan": "^2.12.1",
"node-pre-gyp": "^0.12.0"
},
"dependencies": {
"abbrev": {
......@@ -1740,12 +1740,12 @@
"optional": true
},
"debug": {
"version": "2.6.9",
"version": "4.1.1",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"ms": "2.0.0"
"ms": "^2.1.1"
}
},
"deep-extend": {
......@@ -1910,24 +1910,31 @@
}
},
"ms": {
"version": "2.0.0",
"version": "2.1.1",
"bundled": true,
"dev": true,
"optional": true
},
"nan": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.13.2.tgz",
"integrity": "sha512-TghvYc72wlMGMVMluVo9WRJc0mB8KxxF/gZ4YYFy7V2ZQX9l7rgbPg7vjS9mt6U5HXODVFVI2bOduCzwOMv/lw==",
"dev": true,
"optional": true
},
"needle": {
"version": "2.2.4",
"version": "2.3.0",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"debug": "^2.1.2",
"debug": "^4.1.0",
"iconv-lite": "^0.4.4",
"sax": "^1.2.4"
}
},
"node-pre-gyp": {
"version": "0.10.3",
"version": "0.12.0",
"bundled": true,
"dev": true,
"optional": true,
......@@ -1955,13 +1962,13 @@
}
},
"npm-bundled": {
"version": "1.0.5",
"version": "1.0.6",
"bundled": true,
"dev": true,
"optional": true
},
"npm-packlist": {
"version": "1.2.0",
"version": "1.4.1",
"bundled": true,
"dev": true,
"optional": true,
......@@ -2097,7 +2104,7 @@
"optional": true
},
"semver": {
"version": "5.6.0",
"version": "5.7.0",
"bundled": true,
"dev": true,
"optional": true
......@@ -4862,9 +4869,9 @@
}
},
"tfjs-image-recognition-base": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.5.1.tgz",
"integrity": "sha512-xk1feiuWiX56PZ4sK20rcVvqwPXdxzAV3TDCdeCQV/yPYDyq1lU98JBDUliX1g6o8jL5v4f6yyn3A5tq9kbCpg==",
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.6.0.tgz",
"integrity": "sha512-wFk3ivWjdQwsXgEfU1PdTf3smve2AbCjiwJKrq9lDGmKh75aL8UIy0bVNa15r+8sFaT4vJz/9AKOSI0w78wW0g==",
"requires": {
"@tensorflow/tfjs-core": "1.0.3",
"tslib": "^1.9.3"
......
......@@ -14,7 +14,8 @@
"test": "karma start",
"test-browser": "karma start --single-run",
"test-node": "ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-all": "npm run test-browser && npm run test-node",
"test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed",
"test-all-include-uncompressed": "npm run test-browser && npm run test-node",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
......@@ -22,6 +23,7 @@
"test-mtcnn": "set UUT=mtcnn&& karma start",
"test-cpu": "set BACKEND_CPU=true&& karma start",
"test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start",
"test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"docs": "typedoc --options ./typedoc.config.js ./src"
},
......@@ -36,7 +38,7 @@
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "1.0.3",
"tfjs-image-recognition-base": "^0.5.1",
"tfjs-image-recognition-base": "^0.6.0",
"tslib": "^1.9.3"
},
"devDependencies": {
......
......@@ -15,6 +15,7 @@ export class FaceDetection extends ObjectDetection implements IFaceDetecion {
}
public forSize(width: number, height: number): FaceDetection {
return super.forSize(width, height)
const { score, relativeBox, imageDims } = super.forSize(width, height)
return new FaceDetection(score, relativeBox, imageDims)
}
}
\ No newline at end of file
import { drawText, env, getContext2dOrThrow, getDefaultDrawOptions, resolveInput, round } from 'tfjs-image-recognition-base';
import { IRect } from 'tfjs-image-recognition-base';
import { DrawFaceExpressionsInput, DrawFaceExpressionsOptions } from './types';
export function drawFaceExpressions(
canvasArg: string | HTMLCanvasElement,
faceExpressions: DrawFaceExpressionsInput | DrawFaceExpressionsInput[],
options?: DrawFaceExpressionsOptions
) {
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof env.getEnv().Canvas)) {
throw new Error('drawFaceExpressions - expected canvas to be of type: HTMLCanvasElement')
}
const drawOptions = Object.assign(
getDefaultDrawOptions(options),
(options || {})
)
const ctx = getContext2dOrThrow(canvas)
const {
primaryColor = 'red',
secondaryColor = 'blue',
primaryFontSize = 22,
secondaryFontSize = 16,
minConfidence = 0.2
} = drawOptions
const faceExpressionsArray = Array.isArray(faceExpressions)
? faceExpressions
: [faceExpressions]
faceExpressionsArray.forEach(({ position, expressions }) => {
const { x, y } = position
const height = (position as IRect).height || 0
const sorted = expressions.sort((a, b) => b.probability - a.probability)
const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence)
let offset = (y + height + resultsToDisplay.length * primaryFontSize) > canvas.height
? -(resultsToDisplay.length * primaryFontSize)
: 0
resultsToDisplay.forEach((expr, i) => {
const text = `${expr.expression} (${round(expr.probability)})`
drawText(
ctx,
x,
y + height + (i * primaryFontSize) + offset,
text,
{
textColor: i === 0 ? primaryColor : secondaryColor,
fontSize: i === 0 ? primaryFontSize : secondaryFontSize
}
)
})
})
}
\ No newline at end of file
import { env, getContext2dOrThrow, getDefaultDrawOptions, resolveInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { drawContour } from './drawContour';
import { DrawLandmarksOptions } from './types';
export function drawLandmarks(
canvasArg: string | HTMLCanvasElement,
faceLandmarks: FaceLandmarks | FaceLandmarks[],
options?: DrawLandmarksOptions
) {
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof env.getEnv().Canvas)) {
throw new Error('drawLandmarks - expected canvas to be of type: HTMLCanvasElement')
}
const drawOptions = Object.assign(
getDefaultDrawOptions(options),
(options || {})
)
const { drawLines } = Object.assign({ drawLines: false }, (options || {}))
const ctx = getContext2dOrThrow(canvas)
const { lineWidth, color = 'blue' } = drawOptions
const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks]
faceLandmarksArray.forEach(landmarks => {
if (drawLines && landmarks instanceof FaceLandmarks68) {
ctx.strokeStyle = color
ctx.lineWidth = lineWidth
drawContour(ctx, landmarks.getJawOutline())
drawContour(ctx, landmarks.getLeftEyeBrow())
drawContour(ctx, landmarks.getRightEyeBrow())
drawContour(ctx, landmarks.getNose())
drawContour(ctx, landmarks.getLeftEye(), true)
drawContour(ctx, landmarks.getRightEye(), true)
drawContour(ctx, landmarks.getMouth(), true)
return
}
// else draw points
const ptOffset = lineWidth / 2
ctx.fillStyle = color
landmarks.positions.forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
})
}
\ No newline at end of file
export * from './drawContour'
export * from './drawLandmarks'
export * from './drawFaceExpressions'
export * from './extractFaces'
export * from './extractFaceTensors'
export * from './types'
\ No newline at end of file
export * from './extractFaceTensors'
\ No newline at end of file
import { IPoint, IRect } from 'tfjs-image-recognition-base';
import { WithFaceExpressions } from '../factories/WithFaceExpressions';
export type DrawLandmarksOptions = {
lineWidth?: number
color?: string
drawLines?: boolean
}
export type DrawFaceExpressionsOptions = {
primaryColor?: string
secondaryColor?: string
primaryFontSize?: number
secondaryFontSize?: number
minConfidence?: number
}
export type DrawFaceExpressionsInput = WithFaceExpressions<{
position: IPoint | IRect
}>
\ No newline at end of file
import { getContext2dOrThrow, IPoint } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { drawContour } from './drawContour';
export interface IDrawFaceLandmarksOptions {
drawLines?: boolean
drawPoints?: boolean
lineWidth?: number
pointSize?: number
lineColor?: string
pointColor?: string
}
export class DrawFaceLandmarksOptions {
public drawLines: boolean
public drawPoints: boolean
public lineWidth: number
public pointSize: number
public lineColor: string
public pointColor: string
constructor(options: IDrawFaceLandmarksOptions = {}) {
const { drawLines = true, drawPoints = true, lineWidth, lineColor, pointSize, pointColor } = options
this.drawLines = drawLines
this.drawPoints = drawPoints
this.lineWidth = lineWidth || 1
this.pointSize = pointSize || 2
this.lineColor = lineColor || 'rgba(0, 255, 255, 1)'
this.pointColor = pointColor || 'rgba(255, 0, 255, 1)'
}
}
export class DrawFaceLandmarks {
public faceLandmarks: FaceLandmarks
public options: DrawFaceLandmarksOptions
constructor(
faceLandmarks: FaceLandmarks,
options: IDrawFaceLandmarksOptions = {}
) {
this.faceLandmarks = faceLandmarks
this.options = new DrawFaceLandmarksOptions(options)
}
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {
const ctx = getContext2dOrThrow(canvasArg)
const { drawLines, drawPoints, lineWidth, lineColor, pointSize, pointColor } = this.options
if (drawLines && this.faceLandmarks instanceof FaceLandmarks68) {
ctx.strokeStyle = lineColor
ctx.lineWidth = lineWidth
drawContour(ctx, this.faceLandmarks.getJawOutline())
drawContour(ctx, this.faceLandmarks.getLeftEyeBrow())
drawContour(ctx, this.faceLandmarks.getRightEyeBrow())
drawContour(ctx, this.faceLandmarks.getNose())
drawContour(ctx, this.faceLandmarks.getLeftEye(), true)
drawContour(ctx, this.faceLandmarks.getRightEye(), true)
drawContour(ctx, this.faceLandmarks.getMouth(), true)
}
if (drawPoints) {
ctx.strokeStyle = pointColor
ctx.fillStyle = pointColor
const drawPoint = (pt: IPoint) => {
ctx.beginPath()
ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI)
ctx.fill()
}
this.faceLandmarks.positions.forEach(drawPoint)
}
}
}
export type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks<WithFaceDetection<{}>>
export function drawFaceLandmarks(
canvasArg: string | HTMLCanvasElement,
faceLandmarks: DrawFaceLandmarksInput | Array<DrawFaceLandmarksInput>
) {
const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks]
faceLandmarksArray.forEach(f => {
const landmarks = f instanceof FaceLandmarks
? f
: (isWithFaceLandmarks(f) ? f.landmarks : undefined)
if (!landmarks) {
throw new Error('drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks<WithFaceDetection<{}>> or array thereof')
}
new DrawFaceLandmarks(landmarks).draw(canvasArg)
})
}
\ No newline at end of file
import { Box, draw, IBoundingBox, IRect, round } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
export type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>
export function drawDetections(
canvasArg: string | HTMLCanvasElement,
detections: TDrawDetectionsInput | Array<TDrawDetectionsInput>
) {
const detectionsArray = Array.isArray(detections) ? detections : [detections]
detectionsArray.forEach(det => {
const score = det instanceof FaceDetection
? det.score
: (isWithFaceDetection(det) ? det.detection.score : undefined)
const box = det instanceof FaceDetection
? det.box
: (isWithFaceDetection(det) ? det.detection.box : new Box(det))
const label = score ? `${round(score)}` : undefined
new draw.DrawBox(box, { label }).draw(canvasArg)
})
}
\ No newline at end of file
import { draw, IPoint, Point, round } from 'tfjs-image-recognition-base';
import { FaceExpressions } from '../faceExpressionNet';
import { isWithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
export type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>
export function drawFaceExpressions(
canvasArg: string | HTMLCanvasElement,
faceExpressions: DrawFaceExpressionsInput | Array<DrawFaceExpressionsInput>,
minConfidence = 0.1,
textFieldAnchor?: IPoint
) {
const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions]
faceExpressionsArray.forEach(e => {
const expr = e instanceof FaceExpressions
? e
: (isWithFaceExpressions(e) ? e.expressions : undefined)
if (!expr) {
throw new Error('drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof')
}
const sorted = expr.asSortedArray()
const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence)
const anchor = isWithFaceDetection(e)
? e.detection.box.bottomLeft
: (textFieldAnchor || new Point(0, 0))
const drawTextField = new draw.DrawTextField(
resultsToDisplay.map(expr => `${expr.expression} (${round(expr.probability)})`),
anchor
)
drawTextField.draw(canvasArg)
})
}
\ No newline at end of file
export * from './drawContour'
export * from './drawDetections'
export * from './drawFaceExpressions'
export * from './DrawFaceLandmarks'
\ No newline at end of file
......@@ -4,29 +4,10 @@ import { NetInput, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
import { FaceExpression, faceExpressionLabels, FaceExpressionPrediction } from './types';
import { FaceExpressions } from './FaceExpressions';
export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> {
public static getFaceExpressionLabel(faceExpression: string) {
const label = faceExpressionLabels[faceExpression]
if (typeof label !== 'number') {
throw new Error(`getFaceExpressionLabel - no label for faceExpression: ${faceExpression}`)
}
return label
}
public static decodeProbabilites(probabilities: number[] | Float32Array): FaceExpressionPrediction[] {
if (probabilities.length !== 7) {
throw new Error(`decodeProbabilites - expected probabilities.length to be 7, have: ${probabilities.length}`)
}
return (Object.keys(faceExpressionLabels) as FaceExpression[])
.map(expression => ({ expression, probability: probabilities[faceExpressionLabels[expression]] }))
}
constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {
super('FaceExpressionNet', faceFeatureExtractor)
}
......@@ -50,7 +31,7 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
out.dispose()
const predictionsByBatch = probabilitesByBatch
.map(propablities => FaceExpressionNet.decodeProbabilites(propablities as Float32Array))
.map(probabilites => new FaceExpressions(probabilites as Float32Array))
return netInput.isBatchInput
? predictionsByBatch
......
export const FACE_EXPRESSION_LABELS = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised']
export class FaceExpressions {
public neutral: number
public happy: number
public sad: number
public angry: number
public fearful: number
public disgusted: number
public surprised: number
constructor(probabilities: number[] | Float32Array) {
if (probabilities.length !== 7) {
throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`)
}
FACE_EXPRESSION_LABELS.forEach((expression, idx) => {
this[expression] = probabilities[idx]
})
}
asSortedArray() {
return FACE_EXPRESSION_LABELS
.map(expression => ({ expression, probability: this[expression] as number }))
.sort((e0, e1) => e1.probability - e0.probability)
}
}
\ No newline at end of file
export * from './FaceExpressionNet';
export * from './types';
\ No newline at end of file
export * from './FaceExpressions';
\ No newline at end of file
export const faceExpressionLabels = {
neutral: 0,
happy: 1,
sad: 2,
angry: 3,
fearful: 4,
disgusted: 5,
surprised:6
}
export type FaceExpression = 'neutral' | 'happy' | 'sad' | 'angry' | 'fearful' | 'disgusted' | 'surprised'
export type FaceExpressionPrediction = {
expression: FaceExpression,
probability: number
}
......@@ -4,6 +4,10 @@ export type WithFaceDetection<TSource> = TSource & {
detection: FaceDetection
}
export function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> {
return obj['detection'] instanceof FaceDetection
}
export function extendWithFaceDetection<
TSource
> (
......
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
export type WithFaceExpressions<TSource> = TSource & {
expressions: FaceExpressionPrediction[]
expressions: FaceExpressions
}
export function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}> {
return obj['expressions'] instanceof FaceExpressions
}
export function extendWithFaceExpressions<
TSource
> (
sourceObj: TSource,
expressions: FaceExpressionPrediction[]
expressions: FaceExpressions
): WithFaceExpressions<TSource> {
const extension = { expressions }
......
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from './WithFaceDetection';
import { isWithFaceDetection, WithFaceDetection } from './WithFaceDetection';
export type WithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
......@@ -12,6 +12,13 @@ export type WithFaceLandmarks<
alignedRect: FaceDetection
}
export function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks> {
return isWithFaceDetection(obj)
&& obj['landmarks'] instanceof FaceLandmarks
&& obj['unshiftedLandmarks'] instanceof FaceLandmarks
&& obj['alignedRect'] instanceof FaceDetection
}
export function extendWithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
......
......@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { extractFaces, extractFaceTensors } from '../dom';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
import { ComposableTask } from './ComposableTask';
......@@ -33,7 +33,7 @@ export class PredictAllFaceExpressionsTask<
const faceExpressionsByFace = await Promise.all(faces.map(
face => nets.faceExpressionNet.predictExpressions(face)
)) as FaceExpressionPrediction[][]
)) as FaceExpressions[]
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
......@@ -63,7 +63,7 @@ export class PredictSingleFaceExpressionTask<
? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection])
const faceExpressions = await nets.faceExpressionNet.predictExpressions(faces[0]) as FaceExpressionPrediction[]
const faceExpressions = await nets.faceExpressionNet.predictExpressions(faces[0]) as FaceExpressions
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
......
......@@ -4,7 +4,7 @@ import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';
......@@ -114,7 +114,7 @@ export const computeFaceDescriptor = (input: TNetInput): Promise<Float32Array |
* also be an array of input images, which will be batch processed.
* @returns An array of facial expressions with corresponding probabilities or array thereof in case of batch input.
*/
export const recognizeFaceExpressions = (input: TNetInput): Promise<FaceExpressionPrediction[] | FaceExpressionPrediction[][]> =>
export const recognizeFaceExpressions = (input: TNetInput): Promise<FaceExpressions | FaceExpressions[]> =>
nets.faceExpressionNet.predictExpressions(input)
export const loadSsdMobilenetv1Model = (url: string) => nets.ssdMobilenetv1.load(url)
......
import * as tf from '@tensorflow/tfjs-core';
import { draw as drawBase } from 'tfjs-image-recognition-base';
import * as drawExtended from './draw';
export {
tf
......@@ -7,6 +10,9 @@ export {
export * from 'tfjs-image-recognition-base';
export * from './ageGenderNet/index';
const draw = {...drawBase, ...drawExtended }
export { draw }
export * from './classes/index';
export * from './dom/index'
export * from './faceExpressionNet/index';
......
import { IDimensions } from 'tfjs-image-recognition-base';
import { Dimensions, IDimensions } from 'tfjs-image-recognition-base';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks } from './classes/FaceLandmarks';
import { extendWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks } from './factories/WithFaceLandmarks';
import { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks, isWithFaceLandmarks } from './factories/WithFaceLandmarks';
export function resizeResults<T>(results: T, { width, height }: IDimensions): T {
export function resizeResults<T>(results: T, dimensions: IDimensions): T {
const { width, height } = new Dimensions(dimensions.width, dimensions.height)
if (width <= 0 || height <= 0) {
throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`)
}
if (Array.isArray(results)) {
return results.map(obj => resizeResults(obj, { width, height })) as any as T
}
const hasLandmarks = results['unshiftedLandmarks'] && results['unshiftedLandmarks'] instanceof FaceLandmarks
const hasDetection = results['detection'] && results['detection'] instanceof FaceDetection
if (hasLandmarks) {
const resizedDetection = results['detection'].forSize(width, height)
const resizedLandmarks = results['unshiftedLandmarks'].forSize(resizedDetection.box.width, resizedDetection.box.height)
if (isWithFaceLandmarks(results)) {
const resizedDetection = results.detection.forSize(width, height)
const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height)
return extendWithFaceLandmarks(extendWithFaceDetection(results as any, resizedDetection), resizedLandmarks)
return extendWithFaceLandmarks(extendWithFaceDetection(results, resizedDetection), resizedLandmarks)
}
if (hasDetection) {
return extendWithFaceDetection(results as any, results['detection'].forSize(width, height))
if (isWithFaceDetection(results)) {
return extendWithFaceDetection(results, results.detection.forSize(width, height))
}
if (results instanceof FaceLandmarks || results instanceof FaceDetection) {
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { loadImage } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
import { FaceExpressions } from '../../../src/faceExpressionNet/FaceExpressions';
describeWithBackend('faceExpressionNet', () => {
......@@ -18,15 +18,9 @@ describeWithBackend('faceExpressionNet', () => {
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions', async () => {
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressionPrediction[]
expect(Array.isArray(result)).toBe(true)
expect(result.length).toEqual(7)
const angry = result.find(res => res.expression === 'angry') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressions
expect(result instanceof FaceExpressions).toBe(true)
expect(result.angry).toBeGreaterThan(0.95)
})
})
......@@ -36,70 +30,43 @@ describeWithBackend('faceExpressionNet', () => {
it('recognizes facial expressions for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.browser.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
})
......
......@@ -6,21 +6,17 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
function expectFaceExpressions(results: WithFaceExpressions<{}>[]) {
results.forEach((result, i) => {
const happy = result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction
const neutral = result.expressions.find(res => res.expression === 'neutral') as FaceExpressionPrediction
const { happy, neutral } = result.expressions
const happyProb = i === 4 ? 0 : 0.95
const neutralProb = i === 4 ? 0.4 : 0
expect(happy).not.toBeUndefined()
expect(neutral).not.toBeUndefined()
expect(happy.probability).toBeGreaterThanOrEqual(happyProb)
expect(neutral.probability).toBeGreaterThanOrEqual(neutralProb)
expect(happy).toBeGreaterThanOrEqual(happyProb)
expect(neutral).toBeGreaterThanOrEqual(neutralProb)
})
}
......@@ -184,8 +180,7 @@ describeWithBackend('tinyFaceDetector', () => {
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
result && expect(result.expressions.happy).toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks()', async () => {
......@@ -205,8 +200,7 @@ describeWithBackend('tinyFaceDetector', () => {
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
result && expect(result.expressions.happy).toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
......@@ -246,8 +240,7 @@ describeWithBackend('tinyFaceDetector', () => {
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
result && expect(result.expressions.happy).toBeGreaterThanOrEqual(0.95)
})
})
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment