Unverified Commit e1d7085e authored by Vincent Mühler's avatar Vincent Mühler Committed by GitHub

Merge pull request #287 from justadudewhohacks/age-gender

age and gender recognition
parents 2d885bb7 49114d20
sudo: required
language: node_js
node_js:
- "node"
#- "node"
- "11"
- "10"
- "8"
# node 6 is not compatible with tfjs-node
......
This diff is collapsed.
......@@ -36,6 +36,10 @@ function renderNavBar(navbarId, exampleUri) {
uri: 'face_expression_recognition',
name: 'Face Expression Recognition'
},
{
uri: 'age_and_gender_recognition',
name: 'Age and Gender Recognition'
},
{
uri: 'face_recognition',
name: 'Face Recognition'
......@@ -60,6 +64,10 @@ function renderNavBar(navbarId, exampleUri) {
uri: 'webcam_face_expression_recognition',
name: 'Webcam Face Expression Recognition'
},
{
uri: 'webcam_age_and_gender_recognition',
name: 'Webcam Age and Gender Recognition'
},
{
uri: 'bbt_face_landmark_detection',
name: 'BBT Face Landmark Detection'
......
function resizeCanvasAndResults(dimensions, canvas, results) {
const { width, height } = dimensions instanceof HTMLVideoElement
? faceapi.getMediaDimensions(dimensions)
: dimensions
canvas.width = width
canvas.height = height
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return faceapi.resizeResults(results, { width, height })
}
function drawDetections(dimensions, canvas, detections) {
const resizedDetections = resizeCanvasAndResults(dimensions, canvas, detections)
faceapi.drawDetection(canvas, resizedDetections)
}
function drawLandmarks(dimensions, canvas, results, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection))
}
const faceLandmarks = resizedResults.map(det => det.landmarks)
const drawLandmarksOptions = {
lineWidth: 2,
drawLines: true,
color: 'green'
}
faceapi.drawLandmarks(canvas, faceLandmarks, drawLandmarksOptions)
}
function drawExpressions(dimensions, canvas, results, thresh, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection), { withScore: false })
}
faceapi.drawFaceExpressions(canvas, resizedResults.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
}
\ No newline at end of file
......@@ -19,12 +19,14 @@ app.get('/', (req, res) => res.redirect('/face_detection'))
app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetection.html')))
app.get('/face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarkDetection.html')))
app.get('/face_expression_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceExpressionRecognition.html')))
app.get('/age_and_gender_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'ageAndGenderRecognition.html')))
app.get('/face_extraction', (req, res) => res.sendFile(path.join(viewsDir, 'faceExtraction.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/video_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'videoFaceTracking.html')))
app.get('/webcam_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceDetection.html')))
app.get('/webcam_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceLandmarkDetection.html')))
app.get('/webcam_face_expression_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceExpressionRecognition.html')))
app.get('/webcam_age_and_gender_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'webcamAgeAndGenderRecognition.html')))
app.get('/bbt_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceLandmarkDetection.html')))
app.get('/bbt_face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceSimilarity.html')))
app.get('/bbt_face_matching', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceMatching.html')))
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- image_selection_control -->
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<!-- image_selection_control -->
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options)
// compute face landmarks to align faces for better accuracy
.withFaceLandmarks()
.withAgeAndGender()
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
faceapi.draw.drawDetections(canvas, resizedResults)
resizedResults.forEach(result => {
const { age, gender, genderProbability } = result
new faceapi.draw.DrawTextField(
[
`${faceapi.round(age, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(canvas)
})
}
async function run() {
// load face detection and age and gender recognition models
// and load face landmark model for face alignment
await changeFaceDetector(SSD_MOBILENETV1)
await faceapi.loadFaceLandmarkModel('/')
await faceapi.nets.ageGenderNet.load('/')
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'age_and_gender_recognition')
initImageSelectionControls('happy.jpg', true)
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -65,7 +65,7 @@
function drawLandmarkCanvas(img, landmarks) {
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: 2 , drawLines: true })
new faceapi.draw.DrawFaceLandmarks(landmarks).draw(canvas)
}
async function runLandmarkDetection(useBatchInput) {
......
......@@ -68,13 +68,10 @@
$('#faceContainer').append(canvas)
const x = 20, y = canvas.height - 20
faceapi.drawText(
canvas.getContext('2d'),
x,
y,
faceMatcher.findBestMatch(descriptor).toString(),
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
const ctx = faceapi.getContext2dOrThrow(canvas)
ctx.font = '16px Georgia'
ctx.fillStyle = 'red'
ctx.fillText(faceMatcher.findBestMatch(descriptor).toString(), x, y)
}
async function runComputeFaceDescriptors(useBatchInput) {
......
......@@ -43,12 +43,12 @@
const canvas = faceapi.createCanvasFromMedia(currentImg)
$('#faceContainer').empty()
$('#faceContainer').append(canvas)
faceapi.drawLandmarks(canvas, landmarks, { lineWidth: drawLines ? 2 : 4, drawLines })
new faceapi.draw.DrawFaceLandmarks(landmarks, { drawLines }).draw(canvas)
}
async function onSelectionChanged(uri) {
currentImg = await faceapi.fetchImage(uri)
landmarks = await faceapi.detectLandmarks(currentImg)
landmarks = await faceapi.detectFaceLandmarks(currentImg)
redraw()
}
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<script src="js/bbt.js"></script>
......@@ -159,17 +158,19 @@
function drawFaceRecognitionResults(results) {
const canvas = $('#overlay').get(0)
const inputImgEl = $('#inputImg').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults($('#inputImg').get(0), canvas, results)
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -148,7 +147,9 @@
const results = await faceapi.detectAllFaces(inputImgEl, options)
drawDetections(inputImgEl, $('#overlay').get(0), results)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(results, inputImgEl))
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -149,13 +148,25 @@
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceExpressions()
drawExpressions(inputImgEl, $('#overlay').get(0), results, thresh, true)
const results = await faceapi.detectAllFaces(inputImgEl, options)
// compute face landmarks to align faces for better accuracy
.withFaceLandmarks()
.withFaceExpressions()
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
const minConfidence = 0.05
faceapi.draw.drawDetections(canvas, resizedResults)
faceapi.draw.drawFaceExpressions(canvas, resizedResults, minConfidence)
}
async function run() {
// load face detection and face expression recognition models
// and load face landmark model for face alignment
await changeFaceDetector(SSD_MOBILENETV1)
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadFaceExpressionModel('/')
// start processing image
updateResults()
......
......@@ -150,9 +150,7 @@
function displayExtractedFaces(faceImages) {
const canvas = $('#overlay').get(0)
const { width, height } = $('#inputImg').get(0)
canvas.width = width
canvas.height = height
faceapi.matchDimensions(canvas, $('#inputImg').get(0))
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
......@@ -162,7 +161,14 @@
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceLandmarks()
drawLandmarks(inputImgEl, $('#overlay').get(0), results, withBoxes)
const canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, inputImgEl)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
async function run() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -205,11 +204,11 @@
}
async function updateReferenceImageResults() {
const imgEl = $('#refImg').get(0)
const inputImgEl = $('#refImg').get(0)
const canvas = $('#refImgOverlay').get(0)
const fullFaceDescriptions = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -221,16 +220,19 @@
// from the detection results for the reference image
faceMatcher = new faceapi.FaceMatcher(fullFaceDescriptions)
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, fullFaceDescriptions)
const resizedResults = faceapi.resizeResults(fullFaceDescriptions, inputImgEl)
// draw boxes with the corresponding label as text
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const boxesWithText = resizedResults
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
faceapi.drawDetection(canvas, boxesWithText)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateQueryImageResults() {
......@@ -238,27 +240,25 @@
return
}
const imgEl = $('#queryImg').get(0)
const inputImgEl = $('#queryImg').get(0)
const canvas = $('#queryImgOverlay').get(0)
const results = await faceapi
.detectAllFaces(imgEl, getFaceDetectorOptions())
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
resizedResults = resizeCanvasAndResults(imgEl, canvas, results)
// draw boxes with the corresponding label as text
const boxesWithText = resizedResults.map(({ detection, descriptor }) =>
new faceapi.BoxWithText(
detection.box,
// match each face descriptor to the reference descriptor
// with lowest euclidean distance and display the result as text
faceMatcher.findBestMatch(descriptor).toString()
)
)
faceapi.drawDetection(canvas, boxesWithText)
const resizedResults = faceapi.resizeResults(results, inputImgEl)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateResults() {
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -169,18 +168,25 @@
const ts = Date.now()
const faceDetectionTask = faceapi.detectAllFaces(videoEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const drawBoxes = withBoxes
const drawLandmarks = withFaceLandmarks
let task = faceapi.detectAllFaces(videoEl, options)
task = withFaceLandmarks ? task.withFaceLandmarks() : task
const results = await task
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
drawFunction(videoEl, $('#overlay').get(0), results, withBoxes)
const resizedResults = faceapi.resizeResults(results, dims)
if (drawBoxes) {
faceapi.draw.drawDetections(canvas, resizedResults)
}
if (drawLandmarks) {
faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
}
setTimeout(() => onPlay(videoEl))
}
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onloadedmetadata="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
<!-- check boxes -->
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div>
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="128">128 x 128</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
let predictedAges = []
let withBoxes = true
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function interpolateAgePredictions(age) {
predictedAges = [age].concat(predictedAges).slice(0, 30)
const avgPredictedAge = predictedAges.reduce((total, a) => total + a) / predictedAges.length
return avgPredictedAge
}
async function onPlay() {
const videoEl = $('#inputVideo').get(0)
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay())
const options = getFaceDetectorOptions()
const ts = Date.now()
const result = await faceapi.detectSingleFace(videoEl, options)
.withAgeAndGender()
updateTimeStats(Date.now() - ts)
if (result) {
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, dims)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResult)
}
const { age, gender, genderProbability } = resizedResult
// interpolate gender predictions over last 30 frames
// to make the displayed age more stable
const interpolatedAge = interpolateAgePredictions(age)
new faceapi.draw.DrawTextField(
[
`${faceapi.round(interpolatedAge, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(canvas)
}
setTimeout(() => onPlay())
}
async function run() {
// load face detection and face expression recognition models
await changeFaceDetector(TINY_FACE_DETECTOR)
await faceapi.nets.ageGenderNet.load('/')
changeInputSize(224)
// try to access users webcam and stream the images
// to the video element
const stream = await navigator.mediaDevices.getUserMedia({ video: {} })
const videoEl = $('#inputVideo').get(0)
videoEl.srcObject = stream
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_age_and_gender_recognition')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -159,7 +158,9 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawDetections(videoEl, $('#overlay').get(0), [result])
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
faceapi.draw.drawDetections(canvas, faceapi.resizeResults(result, dims))
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -171,7 +170,15 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawExpressions(videoEl, $('#overlay').get(0), [result], withBoxes)
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, dims)
const minConfidence = 0.05
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResult)
}
faceapi.draw.drawFaceExpressions(canvas, resizedResult, minConfidence)
}
setTimeout(() => onPlay())
......
......@@ -3,7 +3,6 @@
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
......@@ -171,7 +170,14 @@
updateTimeStats(Date.now() - ts)
if (result) {
drawLandmarks(videoEl, $('#overlay').get(0), [result], withBoxes)
const canvas = $('#overlay').get(0)
const dims = faceapi.matchDimensions(canvas, videoEl, true)
const resizedResult = faceapi.resizeResults(result, dims)
if (withBoxes) {
faceapi.draw.drawDetections(canvas, resizedResult)
}
faceapi.draw.drawFaceLandmarks(canvas, resizedResult)
}
setTimeout(() => onPlay())
......
import * as faceapi from 'face-api.js';
import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './commons';
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights')
await faceapi.nets.ageGenderNet.loadFromDisk('../../weights')
const img = await canvas.loadImage('../images/bbt1.jpg')
const results = await faceapi.detectAllFaces(img, faceDetectionOptions)
.withFaceLandmarks()
.withAgeAndGender()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.draw.drawDetections(out, results.map(res => res.detection))
results.forEach(result => {
const { age, gender, genderProbability } = result
new faceapi.draw.DrawTextField(
[
`${faceapi.round(age, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(out)
})
saveFile('ageAndGenderRecognition.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/ageAndGenderRecognition.jpg')
}
run()
\ No newline at end of file
......@@ -10,7 +10,7 @@ async function run() {
const detections = await faceapi.detectAllFaces(img, faceDetectionOptions)
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, detections)
faceapi.draw.drawDetections(out, detections)
saveFile('faceDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceDetection.jpg')
......
......@@ -5,15 +5,17 @@ import { canvas, faceDetectionNet, faceDetectionOptions, saveFile } from './comm
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights')
await faceapi.nets.faceExpressionNet.loadFromDisk('../../weights')
const img = await canvas.loadImage('../images/surprised.jpg')
const results = await faceapi.detectAllFaces(img, faceDetectionOptions)
.withFaceLandmarks()
.withFaceExpressions()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection), { withScore: false })
faceapi.drawFaceExpressions(out, results.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceExpressions(out, results)
saveFile('faceExpressionRecognition.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceExpressionRecognition.jpg')
......
......@@ -12,8 +12,8 @@ async function run() {
.withFaceLandmarks()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.landmarks), { drawLines: true, color: 'red' })
faceapi.draw.drawDetections(out, results.map(res => res.detection))
faceapi.draw.drawFaceLandmarks(out, results.map(res => res.landmarks))
saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceLandmarkDetection.jpg')
......
......@@ -26,20 +26,21 @@ async function run() {
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const refBoxesWithText = resultsRef
const refDrawBoxes = resultsRef
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
const outRef = faceapi.createCanvasFromMedia(referenceImage) as any
faceapi.drawDetection(outRef, refBoxesWithText)
saveFile('referenceImage.jpg', outRef.toBuffer('image/jpeg'))
.map((box, i) => new faceapi.draw.DrawBox(box, { label: labels[i] }))
const outRef = faceapi.createCanvasFromMedia(referenceImage)
refDrawBoxes.forEach(drawBox => drawBox.draw(outRef))
const queryBoxesWithText = resultsQuery.map(res => {
saveFile('referenceImage.jpg', (outRef as any).toBuffer('image/jpeg'))
const queryDrawBoxes = resultsQuery.map(res => {
const bestMatch = faceMatcher.findBestMatch(res.descriptor)
return new faceapi.BoxWithText(res.detection.box, bestMatch.toString())
return new faceapi.draw.DrawBox(res.detection.box, { label: bestMatch.toString() })
})
const outQuery = faceapi.createCanvasFromMedia(queryImage) as any
faceapi.drawDetection(outQuery, queryBoxesWithText)
saveFile('queryImage.jpg', outQuery.toBuffer('image/jpeg'))
const outQuery = faceapi.createCanvasFromMedia(queryImage)
queryDrawBoxes.forEach(drawBox => drawBox.draw(outQuery))
saveFile('queryImage.jpg', (outQuery as any).toBuffer('image/jpeg'))
console.log('done, saved results to out/queryImage.jpg')
}
......
......@@ -1661,14 +1661,14 @@
"dev": true
},
"fsevents": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.7.tgz",
"integrity": "sha512-Pxm6sI2MeBD7RdD12RYsqaP0nMiwx8eZBXCa6z2L+mRHm2DYrOYwihmhjpkdjUHwQhslWQjRpEgNq4XvBmaAuw==",
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.8.tgz",
"integrity": "sha512-tPvHgPGB7m40CZ68xqFGkKuzN+RnpGmSV+hgeKxhRpbxdqKXUFJGC3yonBOLzQBcJyGpdZFDfCsdOC2KFsXzeA==",
"dev": true,
"optional": true,
"requires": {
"nan": "^2.9.2",
"node-pre-gyp": "^0.10.0"
"nan": "^2.12.1",
"node-pre-gyp": "^0.12.0"
},
"dependencies": {
"abbrev": {
......@@ -1740,12 +1740,12 @@
"optional": true
},
"debug": {
"version": "2.6.9",
"version": "4.1.1",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"ms": "2.0.0"
"ms": "^2.1.1"
}
},
"deep-extend": {
......@@ -1910,24 +1910,31 @@
}
},
"ms": {
"version": "2.0.0",
"version": "2.1.1",
"bundled": true,
"dev": true,
"optional": true
},
"nan": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.13.2.tgz",
"integrity": "sha512-TghvYc72wlMGMVMluVo9WRJc0mB8KxxF/gZ4YYFy7V2ZQX9l7rgbPg7vjS9mt6U5HXODVFVI2bOduCzwOMv/lw==",
"dev": true,
"optional": true
},
"needle": {
"version": "2.2.4",
"version": "2.3.0",
"bundled": true,
"dev": true,
"optional": true,
"requires": {
"debug": "^2.1.2",
"debug": "^4.1.0",
"iconv-lite": "^0.4.4",
"sax": "^1.2.4"
}
},
"node-pre-gyp": {
"version": "0.10.3",
"version": "0.12.0",
"bundled": true,
"dev": true,
"optional": true,
......@@ -1955,13 +1962,13 @@
}
},
"npm-bundled": {
"version": "1.0.5",
"version": "1.0.6",
"bundled": true,
"dev": true,
"optional": true
},
"npm-packlist": {
"version": "1.2.0",
"version": "1.4.1",
"bundled": true,
"dev": true,
"optional": true,
......@@ -2097,7 +2104,7 @@
"optional": true
},
"semver": {
"version": "5.6.0",
"version": "5.7.0",
"bundled": true,
"dev": true,
"optional": true
......@@ -4862,9 +4869,9 @@
}
},
"tfjs-image-recognition-base": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.5.1.tgz",
"integrity": "sha512-xk1feiuWiX56PZ4sK20rcVvqwPXdxzAV3TDCdeCQV/yPYDyq1lU98JBDUliX1g6o8jL5v4f6yyn3A5tq9kbCpg==",
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.6.0.tgz",
"integrity": "sha512-wFk3ivWjdQwsXgEfU1PdTf3smve2AbCjiwJKrq9lDGmKh75aL8UIy0bVNa15r+8sFaT4vJz/9AKOSI0w78wW0g==",
"requires": {
"@tensorflow/tfjs-core": "1.0.3",
"tslib": "^1.9.3"
......
......@@ -14,14 +14,18 @@
"test": "karma start",
"test-browser": "karma start --single-run",
"test-node": "ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-all": "npm run test-browser && npm run test-node",
"test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed",
"test-all-include-uncompressed": "npm run test-browser && npm run test-node",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
"test-agegendernet": "set UUT=ageGenderNet&& karma start",
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
"test-tinyfacedetector": "set UUT=tinyFaceDetector&& karma start",
"test-globalapi": "set UUT=globalApi&& karma start",
"test-mtcnn": "set UUT=mtcnn&& karma start",
"test-cpu": "set BACKEND_CPU=true&& karma start",
"test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start",
"test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"docs": "typedoc --options ./typedoc.config.js ./src"
},
......@@ -36,7 +40,7 @@
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "1.0.3",
"tfjs-image-recognition-base": "^0.5.1",
"tfjs-image-recognition-base": "^0.6.0",
"tslib": "^1.9.3"
},
"devDependencies": {
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { seperateWeightMaps } from '../faceProcessor/util';
import { TinyXception } from '../xception/TinyXception';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { AgeAndGenderPrediction, Gender, NetOutput, NetParams } from './types';
export class AgeGenderNet extends NeuralNetwork<NetParams> {
private _faceFeatureExtractor: TinyXception
constructor(faceFeatureExtractor: TinyXception = new TinyXception(2)) {
super('AgeGenderNet')
this._faceFeatureExtractor = faceFeatureExtractor
}
public get faceFeatureExtractor(): TinyXception {
return this._faceFeatureExtractor
}
public runNet(input: NetInput | tf.Tensor4D): NetOutput {
const { params } = this
if (!params) {
throw new Error(`${this._name} - load model before inference`)
}
return tf.tidy(() => {
const bottleneckFeatures = input instanceof NetInput
? this.faceFeatureExtractor.forwardInput(input)
: input
const pooled = tf.avgPool(bottleneckFeatures, [7, 7], [2, 2], 'valid').as2D(bottleneckFeatures.shape[0], -1)
const age = fullyConnectedLayer(pooled, params.fc.age).as1D()
const gender = fullyConnectedLayer(pooled, params.fc.gender)
return { age, gender }
})
}
public forwardInput(input: NetInput | tf.Tensor4D): NetOutput {
return tf.tidy(() => {
const { age, gender } = this.runNet(input)
return { age, gender: tf.softmax(gender) }
})
}
public async forward(input: TNetInput): Promise<NetOutput> {
return this.forwardInput(await toNetInput(input))
}
public async predictAgeAndGender(input: TNetInput): Promise<AgeAndGenderPrediction | AgeAndGenderPrediction[]> {
const netInput = await toNetInput(input)
const out = await this.forwardInput(netInput)
const ages = tf.unstack(out.age)
const genders = tf.unstack(out.gender)
const ageAndGenderTensors = ages.map((ageTensor, i) => ({
ageTensor,
genderTensor: genders[i]
}))
const predictionsByBatch = await Promise.all(
ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => {
const age = (await ageTensor.data())[0]
const probMale = (await genderTensor.data())[0]
const isMale = probMale > 0.5
const gender = isMale ? Gender.MALE : Gender.FEMALE
const genderProbability = isMale ? probMale : (1 - probMale)
ageTensor.dispose()
genderTensor.dispose()
return { age, gender, genderProbability }
})
)
out.age.dispose()
out.gender.dispose()
return netInput.isBatchInput
? predictionsByBatch
: predictionsByBatch[0]
}
protected getDefaultModelName(): string {
return 'age_gender_model'
}
public dispose(throwOnRedispose: boolean = true) {
this.faceFeatureExtractor.dispose(throwOnRedispose)
super.dispose(throwOnRedispose)
}
public loadClassifierParams(weights: Float32Array) {
const { params, paramMappings } = this.extractClassifierParams(weights)
this._params = params
this._paramMappings = paramMappings
}
public extractClassifierParams(weights: Float32Array) {
return extractParams(weights)
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap)
this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap)
return extractParamsFromWeigthMap(classifierMap)
}
protected extractParams(weights: Float32Array) {
const classifierWeightSize = (512 * 1 + 1) + (512 * 2 + 2)
const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize)
const classifierWeights = weights.slice(weights.length - classifierWeightSize)
this.faceFeatureExtractor.extractWeights(featureExtractorWeights)
return this.extractClassifierParams(classifierWeights)
}
}
\ No newline at end of file
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { NetParams } from './types';
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
const extractFCParams = TfjsImageRecognitionBase.extractFCParamsFactory(extractWeights, paramMappings)
const age = extractFCParams(512, 1, 'fc/age')
const gender = extractFCParams(512, 2, 'fc/gender')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { fc: { age, gender } }
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { NetParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
function extractFcParams(prefix: string): TfjsImageRecognitionBase.FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
}
const params = {
fc: {
age: extractFcParams('fc/age'),
gender: extractFcParams('fc/gender')
}
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
export * from './AgeGenderNet';
export * from './types';
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
export type AgeAndGenderPrediction = {
age: number
gender: Gender
genderProbability: number
}
export enum Gender {
FEMALE = 'female',
MALE = 'male'
}
export type NetOutput = { age: tf.Tensor1D, gender: tf.Tensor2D }
export type NetParams = {
fc: {
age: TfjsImageRecognitionBase.FCParams
gender: TfjsImageRecognitionBase.FCParams
}
}
\ No newline at end of file
......@@ -15,6 +15,7 @@ export class FaceDetection extends ObjectDetection implements IFaceDetecion {
}
public forSize(width: number, height: number): FaceDetection {
return super.forSize(width, height)
const { score, relativeBox, imageDims } = super.forSize(width, height)
return new FaceDetection(score, relativeBox, imageDims)
}
}
\ No newline at end of file
import { Dimensions, getCenterPoint, IDimensions, Point, Rect } from 'tfjs-image-recognition-base';
import { Box, Dimensions, getCenterPoint, IBoundingBox, IDimensions, IRect, Point, Rect } from 'tfjs-image-recognition-base';
import { minBbox } from '../minBbox';
import { FaceDetection } from './FaceDetection';
// face alignment constants
......@@ -71,16 +72,28 @@ export class FaceLandmarks implements IFaceLandmarks {
* @returns The bounding box of the aligned face.
*/
public align(
detection?: FaceDetection | Rect
): Rect {
detection?: FaceDetection | IRect | IBoundingBox | null,
options: { useDlibAlignment?: boolean, minBoxPadding?: number } = { }
): Box {
if (detection) {
const box = detection instanceof FaceDetection
? detection.box.floor()
: detection
: new Box(detection)
return this.shiftBy(box.x, box.y).align()
return this.shiftBy(box.x, box.y).align(null, options)
}
const { useDlibAlignment, minBoxPadding } = Object.assign({}, { useDlibAlignment: false, minBoxPadding: 0.2 }, options)
if (useDlibAlignment) {
return this.alignDlib()
}
return this.alignMinBbox(minBoxPadding)
}
private alignDlib(): Box {
const centers = this.getRefPointsForAlignment()
const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers
......@@ -97,6 +110,11 @@ export class FaceLandmarks implements IFaceLandmarks {
return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y))
}
private alignMinBbox(padding: number): Box {
const box = minBbox(this.positions)
return box.pad(box.width * padding, box.height * padding)
}
protected getRefPointsForAlignment(): Point[] {
throw new Error('getRefPointsForAlignment not implemented by base class')
}
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
export function loadConvParamsFactory(extractWeightEntry: <T>(originalPath: string, paramRank: number) => T) {
return function(prefix: string): TfjsImageRecognitionBase.ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
}
\ No newline at end of file
import { drawText, env, getContext2dOrThrow, getDefaultDrawOptions, resolveInput, round } from 'tfjs-image-recognition-base';
import { IRect } from 'tfjs-image-recognition-base';
import { DrawFaceExpressionsInput, DrawFaceExpressionsOptions } from './types';
export function drawFaceExpressions(
canvasArg: string | HTMLCanvasElement,
faceExpressions: DrawFaceExpressionsInput | DrawFaceExpressionsInput[],
options?: DrawFaceExpressionsOptions
) {
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof env.getEnv().Canvas)) {
throw new Error('drawFaceExpressions - expected canvas to be of type: HTMLCanvasElement')
}
const drawOptions = Object.assign(
getDefaultDrawOptions(options),
(options || {})
)
const ctx = getContext2dOrThrow(canvas)
const {
primaryColor = 'red',
secondaryColor = 'blue',
primaryFontSize = 22,
secondaryFontSize = 16,
minConfidence = 0.2
} = drawOptions
const faceExpressionsArray = Array.isArray(faceExpressions)
? faceExpressions
: [faceExpressions]
faceExpressionsArray.forEach(({ position, expressions }) => {
const { x, y } = position
const height = (position as IRect).height || 0
const sorted = expressions.sort((a, b) => b.probability - a.probability)
const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence)
let offset = (y + height + resultsToDisplay.length * primaryFontSize) > canvas.height
? -(resultsToDisplay.length * primaryFontSize)
: 0
resultsToDisplay.forEach((expr, i) => {
const text = `${expr.expression} (${round(expr.probability)})`
drawText(
ctx,
x,
y + height + (i * primaryFontSize) + offset,
text,
{
textColor: i === 0 ? primaryColor : secondaryColor,
fontSize: i === 0 ? primaryFontSize : secondaryFontSize
}
)
})
})
}
\ No newline at end of file
import { env, getContext2dOrThrow, getDefaultDrawOptions, resolveInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { drawContour } from './drawContour';
import { DrawLandmarksOptions } from './types';
export function drawLandmarks(
canvasArg: string | HTMLCanvasElement,
faceLandmarks: FaceLandmarks | FaceLandmarks[],
options?: DrawLandmarksOptions
) {
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof env.getEnv().Canvas)) {
throw new Error('drawLandmarks - expected canvas to be of type: HTMLCanvasElement')
}
const drawOptions = Object.assign(
getDefaultDrawOptions(options),
(options || {})
)
const { drawLines } = Object.assign({ drawLines: false }, (options || {}))
const ctx = getContext2dOrThrow(canvas)
const { lineWidth, color = 'blue' } = drawOptions
const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks]
faceLandmarksArray.forEach(landmarks => {
if (drawLines && landmarks instanceof FaceLandmarks68) {
ctx.strokeStyle = color
ctx.lineWidth = lineWidth
drawContour(ctx, landmarks.getJawOutline())
drawContour(ctx, landmarks.getLeftEyeBrow())
drawContour(ctx, landmarks.getRightEyeBrow())
drawContour(ctx, landmarks.getNose())
drawContour(ctx, landmarks.getLeftEye(), true)
drawContour(ctx, landmarks.getRightEye(), true)
drawContour(ctx, landmarks.getMouth(), true)
return
}
// else draw points
const ptOffset = lineWidth / 2
ctx.fillStyle = color
landmarks.positions.forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
})
}
\ No newline at end of file
export * from './drawContour'
export * from './drawLandmarks'
export * from './drawFaceExpressions'
export * from './extractFaces'
export * from './extractFaceTensors'
export * from './types'
\ No newline at end of file
export * from './extractFaceTensors'
\ No newline at end of file
import { IPoint, IRect } from 'tfjs-image-recognition-base';
import { WithFaceExpressions } from '../factories/WithFaceExpressions';
export type DrawLandmarksOptions = {
lineWidth?: number
color?: string
drawLines?: boolean
}
export type DrawFaceExpressionsOptions = {
primaryColor?: string
secondaryColor?: string
primaryFontSize?: number
secondaryFontSize?: number
minConfidence?: number
}
export type DrawFaceExpressionsInput = WithFaceExpressions<{
position: IPoint | IRect
}>
\ No newline at end of file
import { getContext2dOrThrow, IPoint } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { drawContour } from './drawContour';
export interface IDrawFaceLandmarksOptions {
drawLines?: boolean
drawPoints?: boolean
lineWidth?: number
pointSize?: number
lineColor?: string
pointColor?: string
}
export class DrawFaceLandmarksOptions {
public drawLines: boolean
public drawPoints: boolean
public lineWidth: number
public pointSize: number
public lineColor: string
public pointColor: string
constructor(options: IDrawFaceLandmarksOptions = {}) {
const { drawLines = true, drawPoints = true, lineWidth, lineColor, pointSize, pointColor } = options
this.drawLines = drawLines
this.drawPoints = drawPoints
this.lineWidth = lineWidth || 1
this.pointSize = pointSize || 2
this.lineColor = lineColor || 'rgba(0, 255, 255, 1)'
this.pointColor = pointColor || 'rgba(255, 0, 255, 1)'
}
}
export class DrawFaceLandmarks {
public faceLandmarks: FaceLandmarks
public options: DrawFaceLandmarksOptions
constructor(
faceLandmarks: FaceLandmarks,
options: IDrawFaceLandmarksOptions = {}
) {
this.faceLandmarks = faceLandmarks
this.options = new DrawFaceLandmarksOptions(options)
}
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {
const ctx = getContext2dOrThrow(canvasArg)
const { drawLines, drawPoints, lineWidth, lineColor, pointSize, pointColor } = this.options
if (drawLines && this.faceLandmarks instanceof FaceLandmarks68) {
ctx.strokeStyle = lineColor
ctx.lineWidth = lineWidth
drawContour(ctx, this.faceLandmarks.getJawOutline())
drawContour(ctx, this.faceLandmarks.getLeftEyeBrow())
drawContour(ctx, this.faceLandmarks.getRightEyeBrow())
drawContour(ctx, this.faceLandmarks.getNose())
drawContour(ctx, this.faceLandmarks.getLeftEye(), true)
drawContour(ctx, this.faceLandmarks.getRightEye(), true)
drawContour(ctx, this.faceLandmarks.getMouth(), true)
}
if (drawPoints) {
ctx.strokeStyle = pointColor
ctx.fillStyle = pointColor
const drawPoint = (pt: IPoint) => {
ctx.beginPath()
ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI)
ctx.fill()
}
this.faceLandmarks.positions.forEach(drawPoint)
}
}
}
export type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks<WithFaceDetection<{}>>
export function drawFaceLandmarks(
canvasArg: string | HTMLCanvasElement,
faceLandmarks: DrawFaceLandmarksInput | Array<DrawFaceLandmarksInput>
) {
const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks]
faceLandmarksArray.forEach(f => {
const landmarks = f instanceof FaceLandmarks
? f
: (isWithFaceLandmarks(f) ? f.landmarks : undefined)
if (!landmarks) {
throw new Error('drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks<WithFaceDetection<{}>> or array thereof')
}
new DrawFaceLandmarks(landmarks).draw(canvasArg)
})
}
\ No newline at end of file
import { Box, draw, IBoundingBox, IRect, round } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
export type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>
export function drawDetections(
canvasArg: string | HTMLCanvasElement,
detections: TDrawDetectionsInput | Array<TDrawDetectionsInput>
) {
const detectionsArray = Array.isArray(detections) ? detections : [detections]
detectionsArray.forEach(det => {
const score = det instanceof FaceDetection
? det.score
: (isWithFaceDetection(det) ? det.detection.score : undefined)
const box = det instanceof FaceDetection
? det.box
: (isWithFaceDetection(det) ? det.detection.box : new Box(det))
const label = score ? `${round(score)}` : undefined
new draw.DrawBox(box, { label }).draw(canvasArg)
})
}
\ No newline at end of file
import { draw, IPoint, Point, round } from 'tfjs-image-recognition-base';
import { FaceExpressions } from '../faceExpressionNet';
import { isWithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
export type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>
export function drawFaceExpressions(
canvasArg: string | HTMLCanvasElement,
faceExpressions: DrawFaceExpressionsInput | Array<DrawFaceExpressionsInput>,
minConfidence = 0.1,
textFieldAnchor?: IPoint
) {
const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions]
faceExpressionsArray.forEach(e => {
const expr = e instanceof FaceExpressions
? e
: (isWithFaceExpressions(e) ? e.expressions : undefined)
if (!expr) {
throw new Error('drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof')
}
const sorted = expr.asSortedArray()
const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence)
const anchor = isWithFaceDetection(e)
? e.detection.box.bottomLeft
: (textFieldAnchor || new Point(0, 0))
const drawTextField = new draw.DrawTextField(
resultsToDisplay.map(expr => `${expr.expression} (${round(expr.probability)})`),
anchor
)
drawTextField.draw(canvasArg)
})
}
\ No newline at end of file
export * from './drawContour'
export * from './drawDetections'
export * from './drawFaceExpressions'
export * from './DrawFaceLandmarks'
\ No newline at end of file
......@@ -4,29 +4,10 @@ import { NetInput, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
import { FaceExpression, faceExpressionLabels, FaceExpressionPrediction } from './types';
import { FaceExpressions } from './FaceExpressions';
export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> {
public static getFaceExpressionLabel(faceExpression: string) {
const label = faceExpressionLabels[faceExpression]
if (typeof label !== 'number') {
throw new Error(`getFaceExpressionLabel - no label for faceExpression: ${faceExpression}`)
}
return label
}
public static decodeProbabilites(probabilities: number[] | Float32Array): FaceExpressionPrediction[] {
if (probabilities.length !== 7) {
throw new Error(`decodeProbabilites - expected probabilities.length to be 7, have: ${probabilities.length}`)
}
return (Object.keys(faceExpressionLabels) as FaceExpression[])
.map(expression => ({ expression, probability: probabilities[faceExpressionLabels[expression]] }))
}
constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {
super('FaceExpressionNet', faceFeatureExtractor)
}
......@@ -50,7 +31,7 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
out.dispose()
const predictionsByBatch = probabilitesByBatch
.map(propablities => FaceExpressionNet.decodeProbabilites(propablities as Float32Array))
.map(probabilites => new FaceExpressions(probabilites as Float32Array))
return netInput.isBatchInput
? predictionsByBatch
......
export const FACE_EXPRESSION_LABELS = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised']
export class FaceExpressions {
public neutral: number
public happy: number
public sad: number
public angry: number
public fearful: number
public disgusted: number
public surprised: number
constructor(probabilities: number[] | Float32Array) {
if (probabilities.length !== 7) {
throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`)
}
FACE_EXPRESSION_LABELS.forEach((expression, idx) => {
this[expression] = probabilities[idx]
})
}
asSortedArray() {
return FACE_EXPRESSION_LABELS
.map(expression => ({ expression, probability: this[expression] as number }))
.sort((e0, e1) => e1.probability - e0.probability)
}
}
\ No newline at end of file
export * from './FaceExpressionNet';
export * from './types';
\ No newline at end of file
export * from './FaceExpressions';
\ No newline at end of file
export const faceExpressionLabels = {
neutral: 0,
happy: 1,
sad: 2,
angry: 3,
fearful: 4,
disgusted: 5,
surprised:6
}
export type FaceExpression = 'neutral' | 'happy' | 'sad' | 'angry' | 'fearful' | 'disgusted' | 'surprised'
export type FaceExpressionPrediction = {
expression: FaceExpression,
probability: number
}
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { depthwiseSeparableConv } from './depthwiseSeparableConv';
import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function denseBlock3(
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractSeparableConvParams(channelsIn: number, channelsOut: number, mappedPrefix: string): TfjsImageRecognitionBase.SeparableConvParams {
const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1])
const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut])
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/depthwise_filter` },
{ paramPath: `${mappedPrefix}/pointwise_filter` },
{ paramPath: `${mappedPrefix}/bias` }
)
return new TfjsImageRecognitionBase.SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
const extractConvParams = TfjsImageRecognitionBase.extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = TfjsImageRecognitionBase.extractSeparableConvParamsFactory(extractWeights, paramMappings)
function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { loadConvParamsFactory } from '../common/loadConvParamsFactory';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
function extractConvParams(prefix: string): TfjsImageRecognitionBase.ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
function extractSeparableConvParams(prefix: string): TfjsImageRecognitionBase.SeparableConvParams {
const depthwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/depthwise_filter`, 4)
const pointwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/pointwise_filter`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return new TfjsImageRecognitionBase.SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
const extractConvParams = loadConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = TfjsImageRecognitionBase.loadSeparableConvParamsFactory(extractWeightEntry)
function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
const conv0 = isFirstLayer
......
export type WithAge<TSource> = TSource & {
age: number
}
export function isWithAge(obj: any): obj is WithAge<{}> {
return typeof obj['age'] === 'number'
}
export function extendWithAge<
TSource
> (
sourceObj: TSource,
age: number
): WithAge<TSource> {
const extension = { age }
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
......@@ -4,6 +4,10 @@ export type WithFaceDetection<TSource> = TSource & {
detection: FaceDetection
}
export function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> {
return obj['detection'] instanceof FaceDetection
}
export function extendWithFaceDetection<
TSource
> (
......
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
export type WithFaceExpressions<TSource> = TSource & {
expressions: FaceExpressionPrediction[]
expressions: FaceExpressions
}
export function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}> {
return obj['expressions'] instanceof FaceExpressions
}
export function extendWithFaceExpressions<
TSource
> (
sourceObj: TSource,
expressions: FaceExpressionPrediction[]
expressions: FaceExpressions
): WithFaceExpressions<TSource> {
const extension = { expressions }
......
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from './WithFaceDetection';
import { isWithFaceDetection, WithFaceDetection } from './WithFaceDetection';
export type WithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
......@@ -12,6 +12,13 @@ export type WithFaceLandmarks<
alignedRect: FaceDetection
}
export function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks> {
return isWithFaceDetection(obj)
&& obj['landmarks'] instanceof FaceLandmarks
&& obj['unshiftedLandmarks'] instanceof FaceLandmarks
&& obj['alignedRect'] instanceof FaceDetection
}
export function extendWithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
......
import { isValidProbablitiy } from 'tfjs-image-recognition-base';
import { Gender } from '../ageGenderNet/types';
export type WithGender<TSource> = TSource & {
gender: Gender
genderProbability: number
}
export function isWithGender(obj: any): obj is WithGender<{}> {
return (obj['gender'] === Gender.MALE || obj['gender'] === Gender.FEMALE)
&& isValidProbablitiy(obj['genderProbability'])
}
export function extendWithGender<
TSource
> (
sourceObj: TSource,
gender: Gender,
genderProbability: number
): WithGender<TSource> {
const extension = { gender, genderProbability }
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { extractFaces, extractFaceTensors } from '../dom';
import { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask';
import { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';
import { nets } from './nets';
import {
PredictAllAgeAndGenderWithFaceAlignmentTask,
PredictSingleAgeAndGenderWithFaceAlignmentTask,
} from './PredictAgeAndGenderTask';
import {
PredictAllFaceExpressionsWithFaceAlignmentTask,
PredictSingleFaceExpressionsWithFaceAlignmentTask,
} from './PredictFaceExpressionsTask';
export class ComputeFaceDescriptorsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
......@@ -25,19 +32,25 @@ export class ComputeAllFaceDescriptorsTask<
const parentResults = await this.parentTask
const alignedRects = parentResults.map(({ alignedRect }) => alignedRect)
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, alignedRects)
: await extractFaces(this.input, alignedRects)
const descriptors = await extractAllFacesAndComputeResults<TSource, Float32Array[]>(
parentResults,
this.input,
faces => Promise.all(faces.map(face =>
nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise<Float32Array>
)),
null,
parentResult => parentResult.landmarks.align(null, { useDlibAlignment: true })
)
const results = await Promise.all(parentResults.map(async (parentResult, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[i]) as Float32Array
return extendWithFaceDescriptor<TSource>(parentResult, descriptor)
}))
return descriptors.map((descriptor, i) => extendWithFaceDescriptor<TSource>(parentResults[i], descriptor))
}
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
withFaceExpressions() {
return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
return results
withAgeAndGender() {
return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
}
......@@ -51,15 +64,22 @@ export class ComputeSingleFaceDescriptorTask<
if (!parentResult) {
return
}
const descriptor = await extractSingleFaceAndComputeResult<TSource, Float32Array>(
parentResult,
this.input,
face => nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise<Float32Array>,
null,
parentResult => parentResult.landmarks.align(null, { useDlibAlignment: true })
)
const { alignedRect } = parentResult
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [alignedRect])
: await extractFaces(this.input, [alignedRect])
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[0]) as Float32Array
return extendWithFaceDescriptor(parentResult, descriptor)
}
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
withFaceExpressions() {
return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
return extendWithFaceDescriptor(parentResult, descriptor)
withAgeAndGender() {
return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
}
\ No newline at end of file
......@@ -10,7 +10,14 @@ import { extendWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFac
import { ComposableTask } from './ComposableTask';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { nets } from './nets';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
import {
PredictAllAgeAndGenderWithFaceAlignmentTask,
PredictSingleAgeAndGenderWithFaceAlignmentTask,
} from './PredictAgeAndGenderTask';
import {
PredictAllFaceExpressionsWithFaceAlignmentTask,
PredictSingleFaceExpressionsWithFaceAlignmentTask,
} from './PredictFaceExpressionsTask';
export class DetectFaceLandmarksTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
......@@ -52,8 +59,16 @@ export class DetectAllFaceLandmarksTask<
)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input)
withFaceExpressions() {
return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
withAgeAndGender() {
return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
withFaceDescriptors() {
return new ComputeAllFaceDescriptorsTask(this, this.input)
}
}
......@@ -80,7 +95,15 @@ export class DetectSingleFaceLandmarksTask<
return extendWithFaceLandmarks<TSource>(parentResult, landmarks)
}
withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> {
return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input)
withFaceExpressions() {
return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
withAgeAndGender() {
return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
withFaceDescriptor() {
return new ComputeSingleFaceDescriptorTask(this, this.input)
}
}
\ No newline at end of file
......@@ -8,7 +8,8 @@ import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOpt
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
import { PredictAllAgeAndGenderTask, PredictSingleAgeAndGenderTask } from './PredictAgeAndGenderTask';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionsTask } from './PredictFaceExpressionsTask';
import { FaceDetectionOptions } from './types';
export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
......@@ -57,16 +58,23 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
})
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectAllFaceLandmarksTask<WithFaceDetection<{}>>(
withFaceLandmarks(useTinyLandmarkNet: boolean = false) {
return new DetectAllFaceLandmarksTask(
this.runAndExtendWithFaceDetections(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceDetection<{}>> {
return new PredictAllFaceExpressionsTask<WithFaceDetection<{}>>(
withFaceExpressions() {
return new PredictAllFaceExpressionsTask (
this.runAndExtendWithFaceDetections(),
this.input
)
}
withAgeAndGender() {
return new PredictAllAgeAndGenderTask(
this.runAndExtendWithFaceDetections(),
this.input
)
......@@ -93,19 +101,25 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un
})
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectSingleFaceLandmarksTask<WithFaceDetection<{}>>(
withFaceLandmarks(useTinyLandmarkNet: boolean = false) {
return new DetectSingleFaceLandmarksTask(
this.runAndExtendWithFaceDetection(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpressions(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> {
return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>(
withFaceExpressions() {
return new PredictSingleFaceExpressionsTask(
this.runAndExtendWithFaceDetection(),
this.input
)
}
withAgeAndGender() {
return new PredictSingleAgeAndGenderTask(
this.runAndExtendWithFaceDetection(),
this.input
)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { AgeAndGenderPrediction } from '../ageGenderNet/types';
import { extendWithAge, WithAge } from '../factories/WithAge';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { extendWithGender, WithGender } from '../factories/WithGender';
import { ComposableTask } from './ComposableTask';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';
import { nets } from './nets';
import {
PredictAllFaceExpressionsTask,
PredictAllFaceExpressionsWithFaceAlignmentTask,
PredictSingleFaceExpressionsTask,
PredictSingleFaceExpressionsWithFaceAlignmentTask,
} from './PredictFaceExpressionsTask';
export class PredictAgeAndGenderTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput,
protected extractedFaces?: Array<HTMLCanvasElement | tf.Tensor3D>
) {
super()
}
}
export class PredictAllAgeAndGenderTask<
TSource extends WithFaceDetection<{}>
> extends PredictAgeAndGenderTaskBase<WithAge<WithGender<TSource>>[], TSource[]> {
public async run(): Promise<WithAge<WithGender<TSource>>[]> {
const parentResults = await this.parentTask
const ageAndGenderByFace = await extractAllFacesAndComputeResults<TSource, AgeAndGenderPrediction[]>(
parentResults,
this.input,
async faces => await Promise.all(faces.map(
face => nets.ageGenderNet.predictAgeAndGender(face) as Promise<AgeAndGenderPrediction>
)),
this.extractedFaces
)
return parentResults.map((parentResult, i) => {
const { age, gender, genderProbability } = ageAndGenderByFace[i]
return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age)
})
}
withFaceExpressions() {
return new PredictAllFaceExpressionsTask(this, this.input)
}
}
export class PredictSingleAgeAndGenderTask<
TSource extends WithFaceDetection<{}>
> extends PredictAgeAndGenderTaskBase<WithAge<WithGender<TSource>> | undefined, TSource | undefined> {
public async run(): Promise<WithAge<WithGender<TSource>> | undefined> {
const parentResult = await this.parentTask
if (!parentResult) {
return
}
const { age, gender, genderProbability } = await extractSingleFaceAndComputeResult<TSource, AgeAndGenderPrediction>(
parentResult,
this.input,
face => nets.ageGenderNet.predictAgeAndGender(face) as Promise<AgeAndGenderPrediction>,
this.extractedFaces
)
return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age)
}
withFaceExpressions() {
return new PredictSingleFaceExpressionsTask(this, this.input)
}
}
export class PredictAllAgeAndGenderWithFaceAlignmentTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends PredictAllAgeAndGenderTask<TSource> {
withFaceExpressions() {
return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
withFaceDescriptors() {
return new ComputeAllFaceDescriptorsTask(this, this.input)
}
}
export class PredictSingleAgeAndGenderWithFaceAlignmentTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends PredictSingleAgeAndGenderTask<TSource> {
withFaceExpressions() {
return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input)
}
withFaceDescriptor() {
return new ComputeSingleFaceDescriptorTask(this, this.input)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { extractFaces, extractFaceTensors } from '../dom';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';
import { nets } from './nets';
import {
PredictAllAgeAndGenderTask,
PredictAllAgeAndGenderWithFaceAlignmentTask,
PredictSingleAgeAndGenderTask,
PredictSingleAgeAndGenderWithFaceAlignmentTask,
} from './PredictAgeAndGenderTask';
export class PredictFaceExpressionsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput
protected input: TNetInput,
protected extractedFaces?: Array<HTMLCanvasElement | tf.Tensor3D>
) {
super()
}
......@@ -26,28 +34,26 @@ export class PredictAllFaceExpressionsTask<
const parentResults = await this.parentTask
const detections = parentResults.map(parentResult => parentResult.detection)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, detections)
: await extractFaces(this.input, detections)
const faceExpressionsByFace = await Promise.all(faces.map(
face => nets.faceExpressionNet.predictExpressions(face)
)) as FaceExpressionPrediction[][]
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
const faceExpressionsByFace = await extractAllFacesAndComputeResults<TSource, FaceExpressions[]>(
parentResults,
this.input,
async faces => await Promise.all(faces.map(
face => nets.faceExpressionNet.predictExpressions(face) as Promise<FaceExpressions>
)),
this.extractedFaces
)
return parentResults.map(
(parentResult, i) => extendWithFaceExpressions<TSource>(parentResult, faceExpressionsByFace[i])
)
}
withFaceLandmarks(): DetectAllFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectAllFaceLandmarksTask(this, this.input, false)
withAgeAndGender() {
return new PredictAllAgeAndGenderTask(this, this.input)
}
}
export class PredictSingleFaceExpressionTask<
export class PredictSingleFaceExpressionsTask<
TSource extends WithFaceDetection<{}>
> extends PredictFaceExpressionsTaskBase<WithFaceExpressions<TSource> | undefined, TSource | undefined> {
......@@ -58,19 +64,43 @@ export class PredictSingleFaceExpressionTask<
return
}
const { detection } = parentResult
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection])
const faceExpressions = await extractSingleFaceAndComputeResult<TSource, FaceExpressions>(
parentResult,
this.input,
face => nets.faceExpressionNet.predictExpressions(face) as Promise<FaceExpressions>,
this.extractedFaces
)
return extendWithFaceExpressions(parentResult, faceExpressions)
}
withAgeAndGender() {
return new PredictSingleAgeAndGenderTask(this, this.input)
}
}
const faceExpressions = await nets.faceExpressionNet.predictExpressions(faces[0]) as FaceExpressionPrediction[]
export class PredictAllFaceExpressionsWithFaceAlignmentTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends PredictAllFaceExpressionsTask<TSource> {
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
withAgeAndGender() {
return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
return extendWithFaceExpressions(parentResult, faceExpressions)
withFaceDescriptors() {
return new ComputeAllFaceDescriptorsTask(this, this.input)
}
}
export class PredictSingleFaceExpressionsWithFaceAlignmentTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends PredictSingleFaceExpressionsTask<TSource> {
withAgeAndGender() {
return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input)
}
withFaceLandmarks(): DetectSingleFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectSingleFaceLandmarksTask(this, this.input, false)
withFaceDescriptor() {
return new ComputeSingleFaceDescriptorTask(this, this.input)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { extractFaces, extractFaceTensors } from '../dom';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
export async function extractAllFacesAndComputeResults<TSource extends WithFaceDetection<{}>, TResult>(
parentResults: TSource[],
input: TNetInput,
computeResults: (faces: Array<HTMLCanvasElement | tf.Tensor3D>) => Promise<TResult>,
extractedFaces?: Array<HTMLCanvasElement | tf.Tensor3D> | null,
getRectForAlignment: (parentResult: WithFaceLandmarks<TSource, any>) => FaceDetection = ({ alignedRect }) => alignedRect
) {
const faceBoxes = parentResults.map(parentResult =>
isWithFaceLandmarks(parentResult)
? getRectForAlignment(parentResult)
: parentResult.detection
)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = extractedFaces || (
input instanceof tf.Tensor
? await extractFaceTensors(input, faceBoxes)
: await extractFaces(input, faceBoxes)
)
const results = await computeResults(faces)
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return results
}
export async function extractSingleFaceAndComputeResult<TSource extends WithFaceDetection<{}>, TResult>(
parentResult: TSource,
input: TNetInput,
computeResult: (face: HTMLCanvasElement | tf.Tensor3D) => Promise<TResult>,
extractedFaces?: Array<HTMLCanvasElement | tf.Tensor3D> | null,
getRectForAlignment?: (parentResult: WithFaceLandmarks<TSource, any>) => FaceDetection
) {
return extractAllFacesAndComputeResults<TSource, TResult>(
[parentResult],
input,
async faces => computeResult(faces[0]),
extractedFaces,
getRectForAlignment
)
}
\ No newline at end of file
import { TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { AgeGenderNet } from '../ageGenderNet/AgeGenderNet';
import { AgeAndGenderPrediction } from '../ageGenderNet/types';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';
......@@ -26,7 +28,8 @@ export const nets = {
faceLandmark68Net: new FaceLandmark68Net(),
faceLandmark68TinyNet: new FaceLandmark68TinyNet(),
faceRecognitionNet: new FaceRecognitionNet(),
faceExpressionNet: new FaceExpressionNet()
faceExpressionNet: new FaceExpressionNet(),
ageGenderNet: new AgeGenderNet()
}
/**
......@@ -107,16 +110,25 @@ export const computeFaceDescriptor = (input: TNetInput): Promise<Float32Array |
/**
* Recognizes the facial expressions of a face and returns the likelyhood of
* each facial expression.
* Recognizes the facial expressions from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns An array of facial expressions with corresponding probabilities or array thereof in case of batch input.
* @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.
*/
export const recognizeFaceExpressions = (input: TNetInput): Promise<FaceExpressionPrediction[] | FaceExpressionPrediction[][]> =>
export const recognizeFaceExpressions = (input: TNetInput): Promise<FaceExpressions | FaceExpressions[]> =>
nets.faceExpressionNet.predictExpressions(input)
/**
* Predicts age and gender from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Predictions with age, gender and gender probability or array thereof in case of batch input.
*/
export const predictAgeAndGender = (input: TNetInput): Promise<AgeAndGenderPrediction | AgeAndGenderPrediction[]> =>
nets.ageGenderNet.predictAgeAndGender(input)
export const loadSsdMobilenetv1Model = (url: string) => nets.ssdMobilenetv1.load(url)
export const loadTinyFaceDetectorModel = (url: string) => nets.tinyFaceDetector.load(url)
export const loadMtcnnModel = (url: string) => nets.mtcnn.load(url)
......@@ -125,6 +137,7 @@ export const loadFaceLandmarkModel = (url: string) => nets.faceLandmark68Net.loa
export const loadFaceLandmarkTinyModel = (url: string) => nets.faceLandmark68TinyNet.load(url)
export const loadFaceRecognitionModel = (url: string) => nets.faceRecognitionNet.load(url)
export const loadFaceExpressionModel = (url: string) => nets.faceExpressionNet.load(url)
export const loadAgeGenderModel = (url: string) => nets.ageGenderNet.load(url)
// backward compatibility
export const loadFaceDetectionModel = loadSsdMobilenetv1Model
......
import * as tf from '@tensorflow/tfjs-core';
import { draw as drawBase } from 'tfjs-image-recognition-base';
import * as drawExtended from './draw';
export {
tf
......@@ -6,6 +9,10 @@ export {
export * from 'tfjs-image-recognition-base';
export * from './ageGenderNet/index';
const draw = {...drawBase, ...drawExtended }
export { draw }
export * from './classes/index';
export * from './dom/index'
export * from './faceExpressionNet/index';
......
import { BoundingBox, IPoint } from 'tfjs-image-recognition-base';
export function minBbox(pts: IPoint[]): BoundingBox {
const xs = pts.map(pt => pt.x)
const ys = pts.map(pt => pt.y)
const minX = xs.reduce((min, x) => x < min ? x : min, Infinity)
const minY = ys.reduce((min, y) => y < min ? y : min, Infinity)
const maxX = xs.reduce((max, x) => max < x ? x : max, 0)
const maxY = ys.reduce((max, y) => max < y ? y : max, 0)
return new BoundingBox(minX, minY, maxX, maxY)
}
import { IDimensions } from 'tfjs-image-recognition-base';
import { Dimensions, IDimensions } from 'tfjs-image-recognition-base';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks } from './classes/FaceLandmarks';
import { extendWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks } from './factories/WithFaceLandmarks';
import { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks, isWithFaceLandmarks } from './factories/WithFaceLandmarks';
export function resizeResults<T>(results: T, { width, height }: IDimensions): T {
export function resizeResults<T>(results: T, dimensions: IDimensions): T {
const { width, height } = new Dimensions(dimensions.width, dimensions.height)
if (width <= 0 || height <= 0) {
throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`)
}
if (Array.isArray(results)) {
return results.map(obj => resizeResults(obj, { width, height })) as any as T
}
const hasLandmarks = results['unshiftedLandmarks'] && results['unshiftedLandmarks'] instanceof FaceLandmarks
const hasDetection = results['detection'] && results['detection'] instanceof FaceDetection
if (hasLandmarks) {
const resizedDetection = results['detection'].forSize(width, height)
const resizedLandmarks = results['unshiftedLandmarks'].forSize(resizedDetection.box.width, resizedDetection.box.height)
if (isWithFaceLandmarks(results)) {
const resizedDetection = results.detection.forSize(width, height)
const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height)
return extendWithFaceLandmarks(extendWithFaceDetection(results as any, resizedDetection), resizedLandmarks)
return extendWithFaceLandmarks(extendWithFaceDetection(results, resizedDetection), resizedLandmarks)
}
if (hasDetection) {
return extendWithFaceDetection(results as any, results['detection'].forSize(width, height))
if (isWithFaceDetection(results)) {
return extendWithFaceDetection(results, results.detection.forSize(width, height))
}
if (results instanceof FaceLandmarks || results instanceof FaceDetection) {
......
import * as tf from '@tensorflow/tfjs-core';
import {
NetInput,
NeuralNetwork,
normalize,
range,
TfjsImageRecognitionBase,
TNetInput,
toNetInput,
} from 'tfjs-image-recognition-base';
import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';
import { bgrToRgbTensor } from '../mtcnn/bgrToRgbTensor';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function conv(x: tf.Tensor4D, params: TfjsImageRecognitionBase.ConvParams, stride: [number, number]): tf.Tensor4D {
return tf.add(tf.conv2d(x, params.filters, stride, 'same'), params.bias)
}
function reductionBlock(x: tf.Tensor4D, params: ReductionBlockParams, isActivateInput: boolean = true): tf.Tensor4D {
let out = isActivateInput ? tf.relu(x) : x
out = depthwiseSeparableConv(out, params.separable_conv0, [1, 1])
out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1])
out = tf.maxPool(out, [3, 3], [2, 2], 'same')
out = tf.add(out, conv(x, params.expansion_conv, [2, 2]))
return out
}
function mainBlock(x: tf.Tensor4D, params: MainBlockParams): tf.Tensor4D {
let out = depthwiseSeparableConv(tf.relu(x), params.separable_conv0, [1, 1])
out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1])
out = depthwiseSeparableConv(tf.relu(out), params.separable_conv2, [1, 1])
out = tf.add(out, x)
return out
}
export class TinyXception extends NeuralNetwork<TinyXceptionParams> {
private _numMainBlocks: number
constructor(numMainBlocks: number) {
super('TinyXception')
this._numMainBlocks = numMainBlocks
}
public forwardInput(input: NetInput): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('TinyXception - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(256)) as tf.Tensor4D
let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2]))
out = reductionBlock(out, params.entry_flow.reduction_block_0, false)
out = reductionBlock(out, params.entry_flow.reduction_block_1)
range(this._numMainBlocks, 0, 1).forEach((idx) => {
out = mainBlock(out, params.middle_flow[`main_block_${idx}`])
})
out = reductionBlock(out, params.exit_flow.reduction_block)
out = tf.relu(depthwiseSeparableConv(out, params.exit_flow.separable_conv, [1, 1]))
return out
})
}
public async forward(input: TNetInput): Promise<tf.Tensor4D> {
return this.forwardInput(await toNetInput(input))
}
protected getDefaultModelName(): string {
return 'tiny_xception_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap, this._numMainBlocks)
}
protected extractParams(weights: Float32Array) {
return extractParams(weights, this._numMainBlocks)
}
}
\ No newline at end of file
import { range, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
const extractConvParams = TfjsImageRecognitionBase.extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = TfjsImageRecognitionBase.extractSeparableConvParamsFactory(extractWeights, paramMappings)
function extractReductionBlockParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ReductionBlockParams {
const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`)
const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`)
const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`)
return { separable_conv0, separable_conv1, expansion_conv }
}
function extractMainBlockParams(channels: number, mappedPrefix: string): MainBlockParams {
const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`)
const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`)
const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`)
return { separable_conv0, separable_conv1, separable_conv2 }
}
return {
extractConvParams,
extractSeparableConvParams,
extractReductionBlockParams,
extractMainBlockParams
}
}
export function extractParams(weights: Float32Array, numMainBlocks: number): { params: TinyXceptionParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
const {
extractConvParams,
extractSeparableConvParams,
extractReductionBlockParams,
extractMainBlockParams
} = extractorsFactory(extractWeights, paramMappings)
const entry_flow_conv_in = extractConvParams(3, 32, 3, 'entry_flow/conv_in')
const entry_flow_reduction_block_0 = extractReductionBlockParams(32, 64, 'entry_flow/reduction_block_0')
const entry_flow_reduction_block_1 = extractReductionBlockParams(64, 128, 'entry_flow/reduction_block_1')
const entry_flow = {
conv_in: entry_flow_conv_in,
reduction_block_0: entry_flow_reduction_block_0,
reduction_block_1: entry_flow_reduction_block_1
}
const middle_flow = {}
range(numMainBlocks, 0, 1).forEach((idx) => {
middle_flow[`main_block_${idx}`] = extractMainBlockParams(128, `middle_flow/main_block_${idx}`)
})
const exit_flow_reduction_block = extractReductionBlockParams(128, 256, 'exit_flow/reduction_block')
const exit_flow_separable_conv = extractSeparableConvParams(256, 512, 'exit_flow/separable_conv')
const exit_flow = {
reduction_block: exit_flow_reduction_block,
separable_conv: exit_flow_separable_conv
}
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { entry_flow, middle_flow, exit_flow }
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase, range } from 'tfjs-image-recognition-base';
import { loadConvParamsFactory } from '../common/loadConvParamsFactory';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractConvParams = loadConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = TfjsImageRecognitionBase.loadSeparableConvParamsFactory(extractWeightEntry)
function extractReductionBlockParams(mappedPrefix: string): ReductionBlockParams {
const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`)
const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`)
const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`)
return { separable_conv0, separable_conv1, expansion_conv }
}
function extractMainBlockParams(mappedPrefix: string): MainBlockParams {
const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`)
const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`)
const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`)
return { separable_conv0, separable_conv1, separable_conv2 }
}
return {
extractConvParams,
extractSeparableConvParams,
extractReductionBlockParams,
extractMainBlockParams
}
}
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap,
numMainBlocks: number
): { params: TinyXceptionParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const {
extractConvParams,
extractSeparableConvParams,
extractReductionBlockParams,
extractMainBlockParams
} = loadParamsFactory(weightMap, paramMappings)
const entry_flow_conv_in = extractConvParams('entry_flow/conv_in')
const entry_flow_reduction_block_0 = extractReductionBlockParams('entry_flow/reduction_block_0')
const entry_flow_reduction_block_1 = extractReductionBlockParams('entry_flow/reduction_block_1')
const entry_flow = {
conv_in: entry_flow_conv_in,
reduction_block_0: entry_flow_reduction_block_0,
reduction_block_1: entry_flow_reduction_block_1
}
const middle_flow = {}
range(numMainBlocks, 0, 1).forEach((idx) => {
middle_flow[`main_block_${idx}`] = extractMainBlockParams(`middle_flow/main_block_${idx}`)
})
const exit_flow_reduction_block = extractReductionBlockParams('exit_flow/reduction_block')
const exit_flow_separable_conv = extractSeparableConvParams('exit_flow/separable_conv')
const exit_flow = {
reduction_block: exit_flow_reduction_block,
separable_conv: exit_flow_separable_conv
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
return { params: { entry_flow, middle_flow, exit_flow }, paramMappings }
}
\ No newline at end of file
export * from './TinyXception';
\ No newline at end of file
import { TfjsImageRecognitionBase } from "tfjs-image-recognition-base";
export type ReductionBlockParams = {
separable_conv0: TfjsImageRecognitionBase.SeparableConvParams
separable_conv1: TfjsImageRecognitionBase.SeparableConvParams
expansion_conv: TfjsImageRecognitionBase.ConvParams
}
export type MainBlockParams = {
separable_conv0: TfjsImageRecognitionBase.SeparableConvParams
separable_conv1: TfjsImageRecognitionBase.SeparableConvParams
separable_conv2: TfjsImageRecognitionBase.SeparableConvParams
}
export type TinyXceptionParams = {
entry_flow: {
conv_in: TfjsImageRecognitionBase.ConvParams
reduction_block_0: ReductionBlockParams
reduction_block_1: ReductionBlockParams
}
middle_flow: any,
exit_flow: {
reduction_block: ReductionBlockParams
separable_conv: TfjsImageRecognitionBase.SeparableConvParams
}
}
\ No newline at end of file
import { IRect } from '../../../src';
import { sortBoxes } from '../../utils';
import { IRect } from '../src';
import { sortBoxes } from './utils';
export const expectedTinyFaceDetectorBoxes: IRect[] = sortBoxes([
{ x: 29, y: 264, width: 139, height: 137 },
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { AgeAndGenderPrediction } from '../../../src/ageGenderNet/types';
import { loadImage } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
function expectResultsAngry(result: AgeAndGenderPrediction) {
expect(result.age).toBeGreaterThanOrEqual(36)
expect(result.age).toBeLessThanOrEqual(42)
expect(result.gender).toEqual('male')
expect(result.genderProbability).toBeGreaterThanOrEqual(0.9)
}
function expectResultsSurprised(result: AgeAndGenderPrediction) {
expect(result.age).toBeGreaterThanOrEqual(24)
expect(result.age).toBeLessThanOrEqual(28)
expect(result.gender).toEqual('female')
expect(result.genderProbability).toBeGreaterThanOrEqual(0.8)
}
describeWithBackend('ageGenderNet', () => {
let imgElAngry: HTMLImageElement
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => {
it('recognizes age and gender', async () => {
const result = await ageGenderNet.predictAgeAndGender(imgElAngry) as AgeAndGenderPrediction
expectResultsAngry(result)
})
})
describeWithNets('batch inputs', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => {
it('recognizes age and gender for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await ageGenderNet.predictAgeAndGender(inputs) as AgeAndGenderPrediction[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expectResultsAngry(resultAngry)
expectResultsSurprised(resultSurprised)
})
it('computes age and gender for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const results = await ageGenderNet.predictAgeAndGender(inputs) as AgeAndGenderPrediction[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expectResultsAngry(resultAngry)
expectResultsSurprised(resultSurprised)
})
it('computes age and gender for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.browser.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await ageGenderNet.predictAgeAndGender(inputs) as AgeAndGenderPrediction[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expectResultsAngry(resultAngry)
expectResultsSurprised(resultSurprised)
})
})
describeWithNets('no memory leaks', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => {
describe('forwardInput', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry])
const { age, gender } = await ageGenderNet.forwardInput(netInput)
age.dispose()
gender.dispose()
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry, imgElAngry])
const { age, gender } = await ageGenderNet.forwardInput(netInput)
age.dispose()
gender.dispose()
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
const { age, gender } = await ageGenderNet.forwardInput(await toNetInput(tensor))
age.dispose()
gender.dispose()
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const { age, gender } = await ageGenderNet.forwardInput(await toNetInput(tensors))
age.dispose()
gender.dispose()
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const { age, gender } = await ageGenderNet.forwardInput(await toNetInput(tensor))
age.dispose()
gender.dispose()
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const { age, gender } = await ageGenderNet.forwardInput(await toNetInput(tensors))
age.dispose()
gender.dispose()
})
tensors.forEach(t => t.dispose())
})
})
describe('predictExpressions', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender(imgElAngry)
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender([imgElAngry, imgElAngry, imgElAngry])
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender(tensor)
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender(tensors)
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender(tensor)
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await ageGenderNet.predictAgeAndGender(tensors)
})
tensors.forEach(t => t.dispose())
})
})
})
})
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { loadImage } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
import { FaceExpressions } from '../../../src/faceExpressionNet/FaceExpressions';
describeWithBackend('faceExpressionNet', () => {
......@@ -18,15 +18,9 @@ describeWithBackend('faceExpressionNet', () => {
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions', async () => {
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressionPrediction[]
expect(Array.isArray(result)).toBe(true)
expect(result.length).toEqual(7)
const angry = result.find(res => res.expression === 'angry') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressions
expect(result instanceof FaceExpressions).toBe(true)
expect(result.angry).toBeGreaterThan(0.95)
})
})
......@@ -36,70 +30,43 @@ describeWithBackend('faceExpressionNet', () => {
it('recognizes facial expressions for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
it('computes face expressions for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of mixed inputs', async () => {
it('computes face expressions for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.browser.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressions[]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
expect(resultAngry instanceof FaceExpressions).toBe(true)
expect(resultSurprised instanceof FaceExpressions).toBe(true)
expect(resultAngry.angry).toBeGreaterThan(0.95)
expect(resultSurprised.surprised).toBeGreaterThan(0.95)
})
})
......
import { TinyFaceDetectorOptions } from '../../../src';
export const withNetArgs = {
withAllFacesTinyFaceDetector: true,
withFaceExpressionNet: { quantized: true },
withAgeGenderNet: { quantized: true }
}
export const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
export const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
export const faceDetectorOptions = new TinyFaceDetectorOptions({
inputSize: 416
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import {
assembleExpectedFullFaceDescriptions,
describeWithBackend,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
sortByFaceDetection,
} from '../../utils';
import { deltas, expectedScores, faceDetectorOptions, withNetArgs } from './consts';
function expectFaceExpressions(results: WithFaceExpressions<WithFaceDetection<{}>>[]) {
sortByFaceDetection(results).forEach((result, i) => {
const { happy, neutral } = result.expressions
const happyProb = i === 4 ? 0 : 0.95
const neutralProb = i === 4 ? 0.4 : 0
expect(happy).toBeGreaterThanOrEqual(happyProb)
expect(neutral).toBeGreaterThanOrEqual(neutralProb)
})
}
const ages = [34, 27, 41, 26, 31, 37]
const agesUnaligned = [33, 26, 37, 30, 36, 22]
const genders = ['female', 'male', 'male', 'female', 'male', 'female']
function expectAgesAndGender(results: WithAge<WithGender<WithFaceDetection<{}>>>[], aligned = true) {
sortByFaceDetection(results).forEach((result, i) => {
const { age, gender, genderProbability } = result
const expectedAge = aligned ? ages[i] : agesUnaligned[i]
expect(Math.abs(age - expectedAge)).toBeLessThanOrEqual(6)
expect(gender).toEqual(genders[i])
expect(genderProbability).toBeGreaterThanOrEqual(i === 0 ? 0.65 : 0.9)
})
}
describeWithBackend('globalApi', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
describeWithNets('detectAllFaces', withNetArgs, () => {
describe('without face alignment', () => {
it('detectAllFaces.withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
})
it('detectAllFaces.withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withAgeAndGender()
expect(results.length).toEqual(6)
expectAgesAndGender(results, false)
})
it('detectAllFaces.withFaceExpressions().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceExpressions()
.withAgeAndGender()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectAgesAndGender(results, false)
})
it('detectAllFaces.withAgeAndGender().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withAgeAndGender()
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectAgesAndGender(results, false)
})
})
describe('with face alignment', () => {
it('detectAllFaces.withFaceLandmarks().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
expect(results.length).toEqual(6)
expectAgesAndGender(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectAgesAndGender(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectAgesAndGender(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectAgesAndGender(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectAgesAndGender(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describe('no memory leaks', () => {
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptors()
})
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
.withFaceDescriptors()
})
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import {
assembleExpectedFullFaceDescriptions,
describeWithBackend,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
} from '../../utils';
import { deltas, expectedScores, faceDetectorOptions, withNetArgs } from './consts';
function expectFaceExpressions(result: WithFaceExpressions<{}> | undefined) {
expect(!!result).toBeTruthy()
if (result) {
expect(result.expressions.happy).toBeGreaterThanOrEqual(0.95)
}
}
function expectAgeAndGender(result: WithAge<WithGender<{}>> | undefined, aligned = true) {
expect(!!result).toBeTruthy()
if (result) {
const { age, gender, genderProbability } = result
const expectedAge = aligned ? 41 : 37
expect(Math.abs(age - expectedAge)).toBeLessThanOrEqual(5)
expect(gender).toEqual('male')
expect(genderProbability).toBeGreaterThanOrEqual(0.9)
}
}
describeWithBackend('globalApi', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
function expectFaceDetectionWithLandmarks(result: faceapi.WithFaceLandmarks<faceapi.WithFaceDetection<{}>> | undefined) {
expect(!!result).toBeTruthy()
if (result) {
expectFaceDetectionsWithLandmarks(
[result],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
}
}
function expectFullFaceDescription(result: faceapi.WithFaceDescriptor<faceapi.WithFaceLandmarks<faceapi.WithFaceDetection<{}>>> | undefined) {
expect(!!result).toBeTruthy()
if (result) {
expectFullFaceDescriptions(
[result],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
}
}
describeWithNets('detectSingleFace', withNetArgs, () => {
describe('without face alignment', () => {
it('detectSingleFace.withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceExpressions()
expectFaceExpressions(result)
})
it('detectSingleFace.withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withAgeAndGender()
expectAgeAndGender(result, false)
})
it('detectSingleFace.withFaceExpressions().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceExpressions()
.withAgeAndGender()
expectFaceExpressions(result)
expectAgeAndGender(result, false)
})
it('detectSingleFace.withAgeAndGender().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withAgeAndGender()
.withFaceExpressions()
expectFaceExpressions(result)
expectAgeAndGender(result, false)
})
})
describe('with face alignment', () => {
it('detectSingleFace.withFaceLandmarks().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
expectFaceExpressions(result)
expectFaceDetectionWithLandmarks(result)
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
expectAgeAndGender(result)
expectFaceDetectionWithLandmarks(result)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptor()
expectFullFaceDescription(result)
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
expectFaceExpressions(result)
expectAgeAndGender(result)
expectFaceDetectionWithLandmarks(result)
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceExpressions()
expectFaceExpressions(result)
expectAgeAndGender(result)
expectFaceDetectionWithLandmarks(result)
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor()
expectFaceExpressions(result)
expectFullFaceDescription(result)
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceDescriptor()
expectAgeAndGender(result)
expectFullFaceDescription(result)
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
.withFaceDescriptor()
expectFaceExpressions(result)
expectAgeAndGender(result)
expectFullFaceDescription(result)
})
})
describe('no memory leaks', () => {
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptor()
})
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
.withFaceDescriptor()
})
})
})
})
})
\ No newline at end of file
......@@ -2,7 +2,7 @@ import * as faceapi from '../../../src';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
describeWithBackend('tinyFaceDetector.locateFaces', () => {
......
......@@ -4,7 +4,7 @@ import { TinyFaceDetectorOptions, createCanvasFromMedia } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
......@@ -88,7 +88,7 @@ describe('tinyFaceDetector - node', () => {
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
......
......@@ -4,25 +4,8 @@ import { TinyFaceDetectorOptions } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
function expectFaceExpressions(results: WithFaceExpressions<{}>[]) {
results.forEach((result, i) => {
const happy = result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction
const neutral = result.expressions.find(res => res.expression === 'neutral') as FaceExpressionPrediction
const happyProb = i === 4 ? 0 : 0.95
const neutralProb = i === 4 ? 0.4 : 0
expect(happy).not.toBeUndefined()
expect(neutral).not.toBeUndefined()
expect(happy.probability).toBeGreaterThanOrEqual(happyProb)
expect(neutral.probability).toBeGreaterThanOrEqual(neutralProb)
})
}
describeWithBackend('tinyFaceDetector', () => {
......@@ -41,7 +24,7 @@ describeWithBackend('tinyFaceDetector', () => {
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true, withFaceExpressionNet: { quantized: true } }, () => {
describeWithNets('tinyFaceDetector', { withAllFacesTinyFaceDetector: true, withFaceExpressionNet: { quantized: true } }, () => {
describe('detectAllFaces', () => {
......@@ -69,34 +52,6 @@ describeWithBackend('tinyFaceDetector', () => {
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceExpressions()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
})
it('detectAllFaces.withFaceExpressions().withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
......@@ -111,22 +66,6 @@ describeWithBackend('tinyFaceDetector', () => {
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions()withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describe('detectSingleFace', () => {
......@@ -167,48 +106,6 @@ describeWithBackend('tinyFaceDetector', () => {
)
})
it('detectSingleFace.withFaceExpressions()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
expect(!!result).toBeTruthy()
expectFaceDetections(
result ? [result.detection] : [],
[expectedTinyFaceDetectorBoxes[2]],
[expectedScores[2]],
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
expect(!!result).toBeTruthy()
expectFaceDetectionsWithLandmarks(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
......@@ -228,28 +125,6 @@ describeWithBackend('tinyFaceDetector', () => {
)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptor()
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
})
describe('no memory leaks', () => {
......
......@@ -2,6 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { AgeGenderNet } from '../src/ageGenderNet/AgeGenderNet';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceExpressionNet } from '../src/faceExpressionNet/FaceExpressionNet';
......@@ -71,8 +72,12 @@ export function sortLandmarks(landmarks: FaceLandmarks[]) {
return sortByDistanceToOrigin(landmarks, l => l.positions[0])
}
export function sortByFaceDetection<T extends { detection: FaceDetection }>(descs: T[]) {
return sortByDistanceToOrigin(descs, d => d.detection.box)
export function sortByFaceBox<T extends { box: IRect }>(objs: T[]) {
return sortByDistanceToOrigin(objs, o => o.box)
}
export function sortByFaceDetection<T extends { detection: FaceDetection }>(objs: T[]) {
return sortByDistanceToOrigin(objs, d => d.detection.box)
}
export type ExpectedFaceDetectionWithLandmarks = {
......@@ -114,6 +119,7 @@ export type InjectNetArgs = {
faceRecognitionNet: FaceRecognitionNet
mtcnn: Mtcnn
faceExpressionNet: FaceExpressionNet
ageGenderNet: AgeGenderNet
tinyYolov2: TinyYolov2
}
......@@ -129,6 +135,7 @@ export type DescribeWithNetsOptions = {
withFaceRecognitionNet?: WithNetOptions
withMtcnn?: WithNetOptions
withFaceExpressionNet?: WithNetOptions
withAgeGenderNet?: WithNetOptions
withTinyYolov2?: WithTinyYolov2Options
}
......@@ -176,6 +183,7 @@ export function describeWithNets(
faceRecognitionNet,
mtcnn,
faceExpressionNet,
ageGenderNet,
tinyYolov2
} = faceapi.nets
......@@ -192,6 +200,7 @@ export function describeWithNets(
withFaceRecognitionNet,
withMtcnn,
withFaceExpressionNet,
withAgeGenderNet,
withTinyYolov2
} = options
......@@ -244,6 +253,13 @@ export function describeWithNets(
)
}
if (withAgeGenderNet) {
await initNet<AgeGenderNet>(
ageGenderNet,
!!withAgeGenderNet && !withAgeGenderNet.quantized && 'age_gender_model.weights'
)
}
if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>(
tinyYolov2,
......@@ -273,6 +289,7 @@ export function describeWithNets(
faceRecognitionNet,
mtcnn,
faceExpressionNet,
ageGenderNet,
tinyYolov2
})
})
......
[{"weights":[{"name":"entry_flow/conv_in/filters","shape":[3,3,3,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005431825039433498,"min":-0.7441600304023892}},{"name":"entry_flow/conv_in/bias","shape":[32],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/separable_conv0/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005691980614381678,"min":-0.6090419257388395}},{"name":"entry_flow/reduction_block_0/separable_conv0/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009089225881239947,"min":-1.1179747833925135}},{"name":"entry_flow/reduction_block_0/separable_conv0/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/separable_conv1/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00683894624897078,"min":-0.8138346036275228}},{"name":"entry_flow/reduction_block_0/separable_conv1/pointwise_filter","shape":[1,1,64,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011632566358528886,"min":-1.3028474321552352}},{"name":"entry_flow/reduction_block_0/separable_conv1/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_0/expansion_conv/filters","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010254812240600587,"min":-0.9229331016540528}},{"name":"entry_flow/reduction_block_0/expansion_conv/bias","shape":[64],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/separable_conv0/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0052509616403018725,"min":-0.6406173201168285}},{"name":"entry_flow/reduction_block_1/separable_conv0/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010788509424994973,"min":-1.4564487723743214}},{"name":"entry_flow/reduction_block_1/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.00553213918910307,"min":-0.7025816770160899}},{"name":"entry_flow/reduction_block_1/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013602388606351965,"min":-1.6186842441558837}},{"name":"entry_flow/reduction_block_1/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"entry_flow/reduction_block_1/expansion_conv/filters","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007571851038465313,"min":-1.158493208885193}},{"name":"entry_flow/reduction_block_1/expansion_conv/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005766328409606335,"min":-0.6688940955143349}},{"name":"middle_flow/main_block_0/separable_conv0/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.012136116214826995,"min":-1.5776951079275094}},{"name":"middle_flow/main_block_0/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004314773222979377,"min":-0.5652352922102984}},{"name":"middle_flow/main_block_0/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.01107162026798024,"min":-1.2400214700137868}},{"name":"middle_flow/main_block_0/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_0/separable_conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0036451735917259667,"min":-0.4848080876995536}},{"name":"middle_flow/main_block_0/separable_conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008791744942758598,"min":-1.134135097615859}},{"name":"middle_flow/main_block_0/separable_conv2/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004915751896652521,"min":-0.6095532351849126}},{"name":"middle_flow/main_block_1/separable_conv0/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010868691463096469,"min":-1.3368490499608656}},{"name":"middle_flow/main_block_1/separable_conv0/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv1/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005010117269029804,"min":-0.6012140722835765}},{"name":"middle_flow/main_block_1/separable_conv1/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010311148213405235,"min":-1.3816938605963016}},{"name":"middle_flow/main_block_1/separable_conv1/bias","shape":[128],"dtype":"float32"},{"name":"middle_flow/main_block_1/separable_conv2/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004911523706772748,"min":-0.7367285560159123}},{"name":"middle_flow/main_block_1/separable_conv2/pointwise_filter","shape":[1,1,128,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008976466047997568,"min":-1.2207993825276693}},{"name":"middle_flow/main_block_1/separable_conv2/bias","shape":[128],"dtype":"float32"},{"name":"exit_flow/reduction_block/separable_conv0/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005074804436926748,"min":-0.7104726211697447}},{"name":"exit_flow/reduction_block/separable_conv0/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011453078307357489,"min":-1.4545409450344011}},{"name":"exit_flow/reduction_block/separable_conv0/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/reduction_block/separable_conv1/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.007741751390344957,"min":-1.1380374543807086}},{"name":"exit_flow/reduction_block/separable_conv1/pointwise_filter","shape":[1,1,256,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.011347713189966538,"min":-1.497898141075583}},{"name":"exit_flow/reduction_block/separable_conv1/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/reduction_block/expansion_conv/filters","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006717281014311547,"min":-0.8329428457746318}},{"name":"exit_flow/reduction_block/expansion_conv/bias","shape":[256],"dtype":"float32"},{"name":"exit_flow/separable_conv/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0027201742518181892,"min":-0.3237007359663645}},{"name":"exit_flow/separable_conv/pointwise_filter","shape":[1,1,256,512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.010076364348916447,"min":-1.330080094056971}},{"name":"exit_flow/separable_conv/bias","shape":[512],"dtype":"float32"},{"name":"fc/age/weights","shape":[512,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008674054987290326,"min":-1.2664120281443876}},{"name":"fc/age/bias","shape":[1],"dtype":"float32"},{"name":"fc/gender/weights","shape":[512,2],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0029948226377075793,"min":-0.34140978069866407}},{"name":"fc/gender/bias","shape":[2],"dtype":"float32"}],"paths":["age_gender_model-shard1"]}]
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment