ageitgey / face_recognition

The world's simplest facial recognition api for Python and the command line
MIT License
52.98k stars 13.45k forks source link

Getting Error to display result in real time face recognition #780

Open adamgan919191 opened 5 years ago

adamgan919191 commented 5 years ago

Description

I try to create real time face recognition using flask,python and face recognition library provided by python.I have 3 file app.py,face.py and camera.py For your information when I print face_locations and face_encodings it's will display the result in my command prompt if the webcam detect the present of image and the webcam automatically stop if there are no present of image.

My question is why when i run my app.py and open my webcam I get and error something like this? could u help me?

File "C:\tutorial\face_recognition\venv\src\camera.py", line 110, in get_frame cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) TypeError: bad argument type for built-in operation

from flask import Flask, Response, json, render_template
from werkzeug.utils import secure_filename
from flask import request
from os import path, getcwd
import time
from face import Face
import cv2
from db import Database
import face_recognition
app = Flask(__name__)

app.config['file_allowed'] = ['image/png', 'image/jpeg']
app.config['train_img'] = path.join(getcwd(), 'train_img')
app.db = Database()
aface = Face(app) #You would need an app here
aface.load_all()
known_encoding_faces = aface.known_encoding_faces
user_id = aface.face_user_keys

class VideoCamera:
    def __init__(self,app):
        self.known_encoding_faces = aface.known_encoding_faces
        self.user_id = aface.face_user_keys
        #print face.known_encoding_faces
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        self.faces = []
        self.video_capture = cv2.VideoCapture(0)
        self.face_user_keys = {}
        #self.recognize()
        self.name_face()
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py
    def load_user_by_index_key(self, index_key=0):

        key_str = str(index_key)

        if key_str in self.face_user_keys:
            return self.face_user_keys[key_str]

        return None

    def name_face (self):
        results = app.db.select('SELECT users.name,faces.id, faces.user_id, faces.filename, faces.created FROM faces INNER JOIN users on users.id = faces.user_id')
        for row in results:
            user = {
                "name": row[0]
            }
            face = {
                "id": row[1],
                "user_id": row[2],
                "filename": row[3],
                "created": row[4]
            }
            self.faces.append(user)

    def get_frame(self):
        face_locations = []
        face_encodings = []
        face_names = []
        process_this_frame = True
        success, frame = self.video_capture.read()
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            #print(face_locations)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)[0]
            #print(face_encodings)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(self.known_encoding_faces, face_encodings)
                name = "Unknown"

                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = self.faces[first_match_index]

                face_names.append(name)
                #print(face_names)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            #Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def __del__(self):
        self.video_capture.release()
Janluke0 commented 5 years ago

are left and bottom integer?

adamgan919191 commented 5 years ago

yup. can u help me..