justadudewhohacks / face-api.js

JavaScript API for face detection and face recognition in the browser and nodejs with tensorflow.js
MIT License
16.72k stars 3.72k forks source link

Face recognition and emotion detection is not working on some devices #932

Open vahe-martirosyan-qp opened 10 months ago

vahe-martirosyan-qp commented 10 months ago
import React, { useCallback, useEffect, useRef, useState } from "react"
import Webcam from "react-webcam"
import * as faceapi from "face-api.js"
import "../../RespondentVideoWrapper.scss"

interface IVideoRecorder {
  recordedChunks: Blob[]
  setRecordedChunks: React.Dispatch<React.SetStateAction<Blob[]>>
  setUrl: React.Dispatch<React.SetStateAction<string>>
  capturing: boolean
  setCapturing: React.Dispatch<React.SetStateAction<boolean>>
  setUploadFile: React.Dispatch<React.SetStateAction<File | null>>
  setScreenshot: React.Dispatch<any>
  setDetectedEmotions: React.Dispatch<React.SetStateAction<string[]>>
}

const VideoRecorder: React.FC<IVideoRecorder> = ({
  recordedChunks,
  setRecordedChunks,
  setUrl,
  capturing,
  setCapturing,
  setUploadFile,
  setScreenshot,
  setDetectedEmotions,
}) => {
  const mediaRecorderRef = useRef<MediaRecorder | null>(null)
  const webcamRef = useRef<Webcam>(null)
  const stopRef = useRef<HTMLButtonElement>(null)
  const canvasRef = useRef<HTMLCanvasElement>(null)
  const [countdown, setCountdown] = useState<any>(null)
  const [btnNotVisible, setBtnNotVisible] = useState(false)
  const [recordingTime, setRecordingTime] = useState(0)
  const [isRecording, setIsRecording] = useState(false)
  const [emotionsArr, setEmotionsArr] = useState<string[]>([])
  const [screen, setScreen] = useState<any>("")
  const [intervalId, setIntervalId] = useState<NodeJS.Timeout | null>(null)
  let timerInterval: NodeJS.Timeout

  const formatTime = (seconds: number): string => {
    const minutes = Math.floor(seconds / 60)
    const remainingSeconds = seconds % 60
    const formattedMinutes = minutes < 10 ? `0${minutes}` : `${minutes}`
    const formattedSeconds = remainingSeconds < 10 ? `0${remainingSeconds}` : `${remainingSeconds}`
    return `${formattedMinutes}:${formattedSeconds}`
  }

  const startTimer = () => {
    setIsRecording(true)
    setRecordingTime(0)

    timerInterval = setInterval(() => {
      setRecordingTime((prevTime) => prevTime + 1)
    }, 1000)
  }

  const stopTimer = () => {
    setIsRecording(false)
    setRecordingTime(0)
    clearInterval(timerInterval)
  }

  const startCountdown = useCallback(
    (count: number) => {
      setCountdown(count)
      setBtnNotVisible(true)

      const countdownInterval = setInterval(() => {
        setCountdown((prevCount: any) => {
          if (prevCount === 1) {
            clearInterval(countdownInterval)
            if (mediaRecorderRef.current) {
              setBtnNotVisible(false)
              mediaRecorderRef.current.start()
              startTimer()
              captureScreenshot()
            }
            return null
          }
          return prevCount - 1
        })
      }, 1000)
      return () => {
        clearInterval(countdownInterval)
      }
    },

    [mediaRecorderRef],
  )

  const handleDownload = useCallback(() => {
    if (recordedChunks.length) {
      const blob = new Blob(recordedChunks, {
        type: "video/webm",
      })

      const file = new File([blob], "recording.webm", {
        type: "video/webm",
      })

      const src = URL.createObjectURL(blob)
      stopTimer()
      setUrl(src)
      setUploadFile(file)
    } else {
      setTimeout(() => {
        if (stopRef.current) {
          stopRef.current.click()
        }
      }, 500)
    }
  }, [recordedChunks, stopRef])

  const handleStopCaptureClick = useCallback(async () => {
    if (mediaRecorderRef.current) {
      mediaRecorderRef.current.stop()
      setScreenshot(screen)
      handleDownload()
      setCapturing(false)
      if (intervalId !== null) {
        clearInterval(intervalId)
        setIntervalId(null)
      }

      const canvas = canvasRef.current
      if (canvas) {
        const context = canvas.getContext("2d")
        if (context) {
          context.clearRect(0, 0, canvas.width, canvas.height)
        }
      }
    }
  }, [handleDownload, recordedChunks, screen])

  const handleDataAvailable = (data: BlobEvent) => {
    if (data.data.size > 0) {
      setRecordedChunks((prev) => prev.concat(data.data))
    }
  }

  const handleStartCaptureClick = useCallback(() => {
    if (webcamRef.current) {
      const video = webcamRef.current.video

      if (video) {
        const stream = video.srcObject as MediaStream

        if (stream) {
          setCapturing(true)

          mediaRecorderRef.current = new MediaRecorder(stream, {
            mimeType: "video/webm",
          })

          mediaRecorderRef.current.addEventListener("dataavailable", handleDataAvailable)
          startCountdown(3)
        } else {
          console.error("Video stream not available")
        }
      }
    }
  }, [handleDataAvailable])

  const captureScreenshot = () => {
    if (webcamRef.current) {
      const video = webcamRef.current.video
      const canvas = document.createElement("canvas")
      if (video) {
        canvas.width = video.width ?? 823
        canvas.height = video.height ?? 365
        const context = canvas.getContext("2d")

        if (context) {
          context.drawImage(video, 0, 0, canvas.width, canvas.height)

          const screenshotDataUrl = `canvas.toDataURL("image/png")`
          const screenshotBlob = dataURLtoBlob(screenshotDataUrl)
          const screenshotFile = new File([screenshotBlob], "screenshot.png", {
            type: "image/png",
          })
          setScreen(screenshotFile)
        }
      }
    }
  }

  const dataURLtoBlob = (dataURL: string) => {
    const arr = dataURL.split(",")
    const mime = arr[0].match(/:(.*?);/)![1]
    const bstr = atob(arr[1])
    let n = bstr.length
    const u8arr = new Uint8Array(n)

    while (n--) {
      u8arr[n] = bstr.charCodeAt(n)
    }

    return new Blob([u8arr], { type: mime })
  }

  useEffect(() => {
    const loadModels = async () => {
      await Promise.all([
        faceapi.nets.tinyFaceDetector.loadFromUri("/models"),
        faceapi.nets.faceLandmark68Net.loadFromUri("/models"),
        faceapi.nets.faceRecognitionNet.loadFromUri("/models"),
        faceapi.nets.faceExpressionNet.loadFromUri("/models"),
        faceapi.nets.ageGenderNet.loadFromUri("/models"),
      ])

      if (webcamRef.current) {
        const video = webcamRef.current.video

        if (video) {
          video.addEventListener("loadedmetadata", () => {
            const displaySize = { width: video.width ?? 823, height: video.height ?? 365 }
            faceapi.matchDimensions(canvasRef.current!, displaySize)

            setIntervalId(
              setInterval(async () => {
                const detections = await faceapi
                  .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
                  .withFaceLandmarks()
                  .withFaceDescriptors()
                  .withFaceExpressions()
                  .withAgeAndGender()
                faceapi.resizeResults(detections, displaySize)
                const emotions: string[] = detections.map((detection) => {
                  const expression = detection.expressions as faceapi.FaceExpressions
                  const emotionKeys = Object.keys(expression) as (keyof faceapi.FaceExpressions)[]
                  return emotionKeys.reduce((a, b) => (expression[a] > expression[b] ? a : b))
                })

                setEmotionsArr(emotions)

                const canvas = canvasRef.current!
                const context = canvas.getContext("2d")!
                context.clearRect(0, 0, canvas.width, canvas.height)

                // Draw square around each detected face
                detections.forEach((detection) => {
                  const { box } = detection.detection

                  context.beginPath()
                  context.lineWidth = 2
                  context.strokeStyle = "red"

                  context.rect(box.x + 90, box.y - 100, box.width, box.height)
                  context.stroke()
                })
              }, 100),
            )
          })
        } else {
          console.error("Video element not available")
        }
      }
    }

    webcamRef.current && loadModels()

    return () => {
      if (intervalId) {
        clearInterval(intervalId)
      }
    }
  }, [webcamRef])

  useEffect(() => {
    if (isRecording) {
      setDetectedEmotions((prevState) => [...prevState, ...emotionsArr])
    }
  }, [emotionsArr, isRecording])

  return (
    <div>
      <div className={"recorder-wrapper"}>
        <>
          <Webcam
            className={"recorder-wrapper-webcam"}
            audio={true}
            muted={true}
            ref={webcamRef}
            onUserMediaError={(e) => {
              console.error("Error accessing webcam:", e)
            }}
            height={365}
            width={823}
            style={{ objectFit: "contain" }}
          />
          <canvas ref={canvasRef} style={{ position: "absolute", top: 0, left: 0 }} />
          {countdown !== null && (
            <div className='recorder-wrapper-countdown'>
              <p>{countdown}</p>
            </div>
          )}
          {isRecording && (
            <div className='recorder-wrapper-rec'>
              <p>REC: {formatTime(recordingTime)}</p>
            </div>
          )}
          <button
            ref={stopRef}
            onClick={handleStopCaptureClick}
            className={`recorder-wrapper-btn ${capturing ? "recorder-wrapper-btn-capturing" : ""} ${
              btnNotVisible ? "recorder-wrapper-btn-hidden" : ""
            }`}
          >
            <span></span>
          </button>
          <button
            onClick={handleStartCaptureClick}
            className={`recorder-wrapper-btn  ${!capturing ? "recorder-wrapper-btn-stop" : ""} ${
              btnNotVisible ? "recorder-wrapper-btn-hidden" : ""
            }`}
          >
            <span></span>
          </button>
        </>
      </div>
      {!capturing && (
        <div className='recorder-wrapper-info d-flex-column-centered'>
          <p>Ensure your head and shoulders are in shot. Hit record when you’re ready.</p>
        </div>
      )}
    </div>
  )
}

export default VideoRecorder

on my device the face recognition and emotion detection is working good, but on some devices it's not working

manh21 commented 1 month ago

Your device need to have dedicated GPU and definitely not intel cpu

Try using Tiny model instead

tinyFaceDetector
faceLandmark68TinyNet

see #113