Face Detector With NodeJS! #1

Hilton W Silva
HiltonWS.com
Published in
5 min readJul 17, 2021

Prerequisites

Before start please read the follow article https://blog.hiltonws.com/first-api-with-node-js-and-express-da7ade41ca39

What you will see here?

  1. Get image files from a sample folder
  2. How to detect faces?
  3. How to extract faces from image?

Get image files from a sample folder

We will create a folder called images/sample, inside this folder we can put a couple of images with faces.

Here I got some images from my Instagram to test.

Import those lib that we will use in this project

const faceapi = require('@vladmandic/face-api');
const tf = require('@tensorflow/tfjs-node')
const fs = require('fs')
const jpeg = require('jpeg-js');
const sharp = require('sharp');
  • faceapi: Face recognition API
  • tf: Tensor flow helper, to create images frames
  • fs: File system API
  • jpeg: Use to decode image
  • sharp: Image operations

Let’s create a function to process images

async function process() {
// Load models from disk (in this case we only use tinyfacedetector model)
await faceDetectionNet.loadFromDisk('./models');
}

In to the folder ./models put the models get from here https://github.com/HiltonWS/profileIdentifier/tree/face-detector/models

Next steps we will search into samples folder the image

async function process() {
// Load models from disk (in this case we only use tinyfacedetector model)
await faceDetectionNet.loadFromDisk('./models');
// Samples folder
let samples = './images/samples/'
// Reade example folder
fs.readdir(samples, async (_err, files) => {
// For each file detect the face
files.forEach(async (file, index) => {
let imgPath = samples + file
fs.readFile(imgPath, async (_err, img) => {
await detectFaces(img, imgPath, index)
});
});
});
}

So, here we search into folder, and for each file create a imgPath, to be use to read the file and send to detectFaces (next steps), the image path and img buffer

How to detect faces?

We will use the faceapi,js to detect faces, so, we need to create the previous used function called detectFaces

async function detectFaces(img, imgPath, index) {
//Decode image buffer to JPEG
let imgJpeg = jpeg.decode(img, true)
//Create a tensorflow object
let tFrame = tf.browser.fromPixels(imgJpeg)
//Detect all faces
let faces = await faceapi.detectAllFaces(tFrame, faceDetectionOptions)
if (faces.length > 0) {
//For each face cut and save it
faces.forEach(async (face) => {
let box = face.box
saveFace(imgPath, box, index)
})
return;
}
}

With the img buffer we need to decode it to jpeg, to be read from a tensor flow frame (tf),

//Decode image buffer to JPEG
let imgJpeg = jpeg.decode(img, true)
//Create a tensorflow object
let tFrame = tf.browser.fromPixels(imgJpeg)

So, we can now use the faceapi.detectAllFaces

let faces = await faceapi.detectAllFaces(tFrame, faceDetectionOptions)

It’s will return an array of faces

if (faces.length > 0) {
//For each face cut and save it
faces.forEach(async (face) => {
let box = face.box
saveFace(imgPath, box, index)
})

Each face, has a box, that’s is the region where the face was detected in image.

In next step we will create the function saveFace to understand better why we need a box to extract the faces.

How to extract faces from image?

We will create a function called saveFace, it’s will works like a extractor/resize of received image and crop the face from the region (box)

async function saveFace(path, box, suffix) {
//Lets define the params of face region
let left = Math.round(Math.abs(box.left))
let top = Math.round(box.top)
let width = Math.round(box.width)
let height = Math.round(box.height)
let imgPath = './images/faces/face' + suffix + '.jpg';
let size = 150;
let region = {
left: left,
top: top,
width: width,
height: height
}
//If all params is ok, and the face are is valid try extract
try {
await sharp(path).extract(region).resize(size, size).greyscale().toFile(imgPath)
//When error try resize
} catch {
await sharp(path).resize(region).resize(size, size).greyscale().toFile(imgPath)
}
}

We got the information from face box,

//Lets define the params of face region
let left = Math.round(Math.abs(box.left))
let top = Math.round(box.top)
let width = Math.round(box.width)
let height = Math.round(box.height)

Now we can create a region, its used to sharp, to know the area that will be crop.

let region = {
left: left,
top: top,
width: width,
height: height
}

In this case we try to extract, if that face is wrong or have negative values, we try to resize

//If all params is ok, and the face are is valid try extract
try {
await sharp(path).extract(region).resize(size, size).greyscale().toFile(imgPath)
//When error try resize
} catch {
await sharp(path).resize(region).resize(size, size).greyscale().toFile(imgPath)
}

All images will be save in 150px X 150px (size), and in greyscale.

Before run the code we need to create the follow path images/faces, it’s where the faces will be saved.

All the code will look like

const faceapi = require('@vladmandic/face-api');
const tf = require('@tensorflow/tfjs-node')
const fs = require('fs')
const jpeg = require('jpeg-js');
const sharp = require('sharp');
const minConfidence = 0.5;
//Use the algorithm tiny face detector with a confidence more then 50%
const faceDetectionNet = faceapi.nets.tinyFaceDetector;
const faceDetectionOptions = new faceapi.TinyFaceDetectorOptions({ minConfidence });
async function detectFaces(img, imgPath, index) {
//Decode image buffer to JPEG
let imgJpeg = jpeg.decode(img, true)
//Create a tensorflow object
let tFrame = tf.browser.fromPixels(imgJpeg)
//Detect all faces
let faces = await faceapi.detectAllFaces(tFrame, faceDetectionOptions)
if (faces.length > 0) {
//For each face cut and save it
faces.forEach(async (face) => {
let box = face.box
saveFace(imgPath, box, index)
})
return;
}
}
async function saveFace(path, box, suffix) {
//Lets define the params of face region
let left = Math.round(Math.abs(box.left))
let top = Math.round(box.top)
let width = Math.round(box.width)
let height = Math.round(box.height)
let imgPath = './images/faces/face' + suffix + '.jpg';
let size = 150;
let region = {
left: left,
top: top,
width: width,
height: height
}
//If all params is ok, and the face are is valid try extract
try {
await sharp(path).extract(region).resize(size, size).greyscale().toFile(imgPath)
//When error try resize
} catch {
await sharp(path).resize(region).resize(size, size).greyscale().toFile(imgPath)
}
}
async function process() {
// Load models from disk (in this case we only use tinyfacedetector model)
await faceDetectionNet.loadFromDisk('./models');
// Samples folder
let samples = './images/samples/'
// Reade example folder
fs.readdir(samples, async (_err, files) => {
// For each file detect the face
files.forEach(async (file, index) => {
let imgPath = samples + file
fs.readFile(imgPath, async (_err, img) => {
await detectFaces(img, imgPath, index)
});
});
});
}
//Call Process after 100ms
setTimeout(process, 100);

And we can run all the code sample

node face-detector.js

Lets see the img/faces folder

As we can see that’s all the faces detected

In the next steps we will learn how to do a Face Recognization With Labels.

See you them!

Please clone the final project from here https://github.com/HiltonWS/profileIdentifier/blob/face-detector/face-detector.js

--

--

Hilton W Silva
HiltonWS.com

Hi! I’m Hilton W. Silva, I’m a software developer who is passionate about technologies that can help people and open source. ​ Please checkout hiltonws.com