justadudewhohacks / face-api.js

JavaScript API for face detection and face recognition in the browser and nodejs with tensorflow.js
MIT License
16.51k stars 3.68k forks source link

Eculidean issues when doing face Recognization #434

Closed thirukumars closed 4 years ago

thirukumars commented 4 years ago

Uncaught (in promise) TypeError: Cannot read property 'length' of undefined at euclideanDistance (euclideanDistance.js:2) at FaceMatcher.js:60 at Array.map () at FaceMatcher.computeMeanDistance (FaceMatcher.js:59) at FaceMatcher.js:72 at Array.map () at FaceMatcher.matchDescriptor (FaceMatcher.js:69) at FaceMatcher.findBestMatch (FaceMatcher.js:79) at camera.jsx:78 at Array.map () at camera.jsx:78

thirukumars commented 4 years ago

This is my code

import React, { Component } from 'react';
import * as faceapi from 'face-api.js'
const MODEL_URL='/models'

// var labeledFaceDescriptors=0;
// let count=0;

class camera extends Component{

  constructor(props) {
    super(props);
    this.videoTag = React.createRef()

    this.state={
       detection:null,
       video:null
    }
    this.detect=this.detect.bind(this);
  }

  componentDidMount() {
    // getting access to webcam
    navigator.mediaDevices
    .getUserMedia({video: true})
    .then(stream =>
    this.videoTag.current.srcObject = stream,
    this.loadModels(),
    this.detect()
     )
    .catch(console.log);
  }
  loadModels(){
     faceapi.loadFaceDetectionModel(MODEL_URL);
     faceapi.loadSsdMobilenetv1Model(MODEL_URL);
   faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
   faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
     faceapi.loadFaceLandmarkModel(MODEL_URL);
     faceapi.loadFaceRecognitionModel(MODEL_URL)
  }

  detect=async ()=>{
    const videoTag=document.getElementById('videoTag');
    const canvas=document.getElementById('myCanvas');

    console.log("geeting");
    const displaySize = { width: videoTag.width, height: videoTag.height };
    faceapi.matchDimensions(canvas, displaySize);

    setInterval(async () => {
    let fullFaceDescriptions = await faceapi.detectAllFaces(videoTag).withFaceLandmarks();
    const value=fullFaceDescriptions.length;
    this.setState({detection:value});
    console.log("hiii", this.state.detection);
     fullFaceDescriptions = faceapi.resizeResults(fullFaceDescriptions, displaySize);

     const labels = ['praveen',"p"]
     const labeledFaceDescriptors = await Promise.all(
     labels.map(async label => {
     // fetch image data from urls and convert blob to HTMLImage element
     const imgUrl = `/img/${label}.png`
     const img = await faceapi.fetchImage(imgUrl)

     // detect the face with the highest score in the image and compute it's landmarks and face descriptor
     const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
     // console.log(fullFaceDescription.expressions.asSortedArray().toString().expression);
     if (!fullFaceDescription) { 
       throw new Error(`no faces detected for ${label}`)
     }

     const faceDescriptors = [fullFaceDescription.descriptor]
     return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
   })
 )  
     const maxDescriptorDistance = 0.5
     const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
     const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
     results.forEach((bestMatch, i) => {

     const box = fullFaceDescriptions[i].detection.box
     const text = bestMatch.toString()  //this for basMatch name detection

     const drawBox = new faceapi.draw.DrawBox(box, { label: text })
   // canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
   // faceapi.draw.drawDetections(canvas, fullFaceDescriptions)
   //FaceExpression=fullFaceDescription  *(face-api.js)*
   //this function calcultae the average expression *from face-api.js 

   drawBox.draw(canvas);
     })

   canvas.getContext('2d').clearRect(0,` 0, canvas.width, canvas.height);
    faceapi.draw.drawDetections(canvas, fullFaceDescriptions);
    faceapi.draw.drawFaceLandmarks(canvas, fullFaceDescriptions)

    }, 200);

  }

  render() {
    return( 
      <div>
        <div>
          <video id="videoTag" style={{position:"absolute", top:0}}
                 ref={this.videoTag}
                 width={500}
                 height={500}
                 autoPlay
          ></video>
        </div>

        <div>
          <canvas id="myCanvas" style={{position:"absolute", top:0}}
                                          height={500}
                                          width={500}>
          </canvas>
         </div>
         <h1 >{this.state.detection}</h1>
      </div>                      
    );
  }
}
export default camera;
thirukumars commented 4 years ago

i found the answer,in the fullFaceDescriptions is should include .withFaceDescriptor()

abdulkadhir commented 4 years ago

@thirukumars I try the above code but I got 'draw' is not exported from 'face-api.js' (imported as 'faceapi'). can you explain me how to achieve this in react js

benndip commented 3 years ago

@thirukumars since you are using faceapi.detectAllFaces , you should add .withFaceDescriptors() and not .withFaceDescriptor(). There should be an s there since it is not for a single face.

benndip commented 3 years ago

@thirukumars I try the above code but I got 'draw' is not exported from 'face-api.js' (imported as 'faceapi'). can you explain me how to achieve this in react js

@abdulkadhir do this in react it will work import * as faceapi from 'face-api.js'

then use it where ever like this

faceapi.draw.drawDetections(canvasRef.current, resizedDetections) faceapi.draw.drawFaceLandmarks(canvasRef.current, resizedDetections) faceapi.draw.drawFaceExpressions(canvasRef.current, resizedDetections)