justadudewhohacks / face-api.js

JavaScript API for face detection and face recognition in the browser and nodejs with tensorflow.js
MIT License
16.56k stars 3.69k forks source link

Face Extraction ? #510

Open rachitgupta98 opened 4 years ago

rachitgupta98 commented 4 years ago

Having images saved in a folder we can easily detect faces and do whatever functionality we want but I want to know how to extract faces and save it to the image folder or any database. And when we encounter the same images it will be detected.???

mohamadHarith commented 4 years ago

We could extract the face descriptor of the reference image(s) and save it into the database. When we have a query image, we extract the query image's face descriptor and compare against the reference image descriptor stored in the database.

Heres an example:

faceRecognitionService.js

const faceapi = require('face-api.js');
const path = require('path');
const fs = require('fs');
const {createCanvas, Image, Canvas} = require('canvas');
faceapi.env.monkeyPatch({Canvas, Image})

class faceRecognitionService {

    //asynchronous constructor
    constructor() {
        return (async () => {
           await this.loadModels();
            return this; 
        })();
    }

    //a function to load models
    async loadModels(){
        const modelURL = path.join(__dirname, '../', './models');
        await faceapi.nets.tinyFaceDetector.loadFromDisk(modelURL);
        await faceapi.nets.faceLandmark68TinyNet.loadFromDisk(modelURL);
        await faceapi.nets.faceRecognitionNet.loadFromDisk(modelURL);
    }

    //a funtion that returns Float32Array of face descriptor of an image
    //arguments - imagePath: String
    async getFaceDescriptor(imagePath){    
        const imageFile = await fs.readFileSync(imagePath);        
        //create canvas
        const canvas = createCanvas(900, 900);
        const ctx = canvas.getContext('2d');
        const img = new Image()
        img.onload = async() => ctx.drawImage(img, 0, 0)
        img.onerror = err => { throw err }
        img.src = imageFile            
        //face detection
        const option = new faceapi.TinyFaceDetectorOptions({
            inputSize: 512,
            scoreThreshold: 0.6
        });
        const useTinyModel = true;
        const faceDescriptor = await faceapi
        .detectSingleFace(canvas, option)
        .withFaceLandmarks(useTinyModel)
        .withFaceDescriptor();
        return faceDescriptor.descriptor;
    }

   //a function that returns true if referenceFace and queryFace matches
   //arguments: queryImageFaceDescriptor : Float32Array of query face descriptor
   //referenceImageFaceDescriptor: Array of Float32Arrays of reference face descriptors
   //referenceImageFaceName: String

    async isFaceMatch(queryImageFaceDescriptor, referenceImageFaceDescriptors, referenceImageFaceName){        
        const labeledDescriptors = await new faceapi.LabeledFaceDescriptors(referenceImageFaceName, referenceImageFaceDescriptors);
        //console.log(labeledDescriptors);
        const faceMatcher = await new faceapi.FaceMatcher(labeledDescriptors, 0.5);
        const result = await faceMatcher.findBestMatch(queryImageFaceDescriptor);
        if(result._label == referenceImageFaceName && result._distance < 0.5){
            return true
        }
        else return false;
   }
}

module.exports = {faceRecognitionService}

test.js

const {faceRecognitionService} = require('./faceRecognitionService');
const path = require('path');

//multiple reference image allows better face recognition
const referenceImagePath1 = path.join(__dirname, '../', './testImage.jpg');
const referenceImagePath2 = path.join(__dirname, '../', './testImage2.jpg');
const queryImagePath = path.join(__dirname, '../', './testImage4.jpg');

const test = async()=>{
    const test = await new faceRecognitionService();

    //this array could be saved to database and retrieved on demand
    const refImageDescArray = []
    await refImageDescArray.push(await test.getFaceDescriptor(referenceImagePath1));
    await refImageDescArray.push(await test.getFaceDescriptor(referenceImagePath2));

    const queryImageDesc =  await test.getFaceDescriptor(queryImagePath);

    const result = await test.isFaceMatch(queryImageDesc, refImageDescArray, 'John Doe');
    console.log(result); //returns true if query face and reference face matches

}
test();

Hope this helps.

whyboris commented 4 years ago

You can use the sharp module (npm install sharp) and once you have the detections you can do something like this:

const sharp = require('sharp');
...
const detections = await faceapi.detectAllFaces(img);
crop(detections);

function crop(matches) {
  console.log('found', matches.length, 'faces');

  matches.forEach((match, index) => {
    sharp(imgPath)
    .extract({
      left: Math.round(match._box._x),
      top: Math.round(match._box._y),
      width: Math.round(match._box._width),
      height: Math.round(match._box._height)
    })
    .toFile("out_" + index + ".jpg");
  });
}