Closed Risingabhi closed 2 years ago
this project does not guarantee 100% accuracy. so, this is not an issue.
Thanks for update, It may not be an issue, however is it normal to come across such cases ? In our case, we are observing many False Positives repeatedly, it means deepface is working only on some kind of images and is not robust enough to handle real life cases?
yes this is normal
Hi @serengil ,
Kindly share your thoughts on why i am getting false positives in images repeatedly. I have observed that setting threshold value to less than 0.40 gives lot of matches, I have extracted representations and calculated cosine score for images, but its giving me poor results in 2 images below, one image is of girl and other is of boy,
(0.22019625290381495, 'C:/Users/Risin/Desktop/id_photo/omkar.jpg')
how this can be explained? Do you think , its possible to get such results? is there any thing that i might have missed, my code is below:` from mtcnn import MTCNN import cv2 import os from deepface import DeepFace import numpy as np import mtcnn from mtcnn import MTCNN
from deepface.detectors import FaceDetector from deepface.detectors import OpenCvWrapper, SsdWrapper, MtcnnWrapper, RetinaFaceWrapper,MediapipeWrapper
from numpy import asarray import multiprocessing as multi from DeepFace import from commons import functions, distance,realtime from functions import from commons.distance import*
import time
import pickle import pathlib from pathlib import Path from threading import Thread from queue import Queue import math
from folder_alex import* #apicall() imported for getting link to save images after attendance
from search_folders import*
from datetime import datetime, timedelta from scipy import spatial
backends = ['opencv', 'ssd', 'dlib', 'mtcnn'] models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib"]
models = { 'VGG-Face': VGGFace.loadModel, 'OpenFace': OpenFace.loadModel, 'Facenet': Facenet.loadModel, 'Facenet512': Facenet512.loadModel, 'DeepFace': FbDeepFace.loadModel, 'DeepID': DeepID.loadModel,
'Dlib': DlibWrapper.loadModel,
backends = { 'opencv': OpenCvWrapper.build_model, 'ssd': SsdWrapper.build_model,
'dlib': DlibWrapper.build_model,
neural network models can be selected from above.
model = build_model('VGG-Face')
read face image extracted from mtcnn() method
img = cv2.imread('C:/Users/Risin/Desktop/id_photo/yashila_1.jpeg')
img = cv2.imread("C:/Users/Risin/Desktop/FR_ats/Students/Student/1011/1017/Achyatanandmadhukar_102_1.jpg")
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
get its representation.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) blur = cv2.Laplacian(gray, cv2.CV_64F).var() print("blur_score",blur)
img= cv2.detailEnhance(img, sigma_s=10, sigma_r=0.15)
cv2.imshow("test_image",img) cv2.waitKey(0)
embeddings1 = represent(img, model_name='VGG-Face', model=model, enforce_detection= False, detector_backend='mtcnn', align=True, normalization='VGG-Face')
read 2nd image from Database with which it shows similarity in real run with file , face_detector.py
detector = MTCNN()
img2 = cv2.imread('C:/Users/Risin/Desktop/id_photo/omkar.jpg') img2 = cv2.cvtColor(img2,cv2.COLOR_BGR2RGB) embeddings2 = represent(img2, model_name='VGG-Face', model=model, enforce_detection= False, detector_backend='mtcnn', align=True, normalization='base') print(len(embeddings2))
########################FIND COSINE
r = findCosineDistance(embeddings1,embeddings2) print("VALUE WHEN COMPARED ALONE WITH IMAGE", r)
find euclideanL2 as its a better measure of accuracy
scipy cosine
distance_v2 = spatial.distance.cosine(embeddings1, embeddings2) print("distance_v2",distance_v2)
def findEuclideanDistance(source_representation, test_representation): if type(source_representation) == list: source_representation = np.array(source_representation)
euclidean_distance = findEuclideanDistance(embeddings1,embeddings2) print("euclidean_distance",euclidean_distance) def l2_normalize(x): return x / np.sqrt(np.sum(np.multiply(x, x)))
find euclideanL2 as its a better measure of accuracy
detections = detector.detect_faces(img) print("detections:=================",detections)
for detection in detections: if (detection['confidence']) > 0.0: x, y, w, h = detection["box"] face_area_db_img = (w*h)
w_ex, hex , = img.shape
detector_backend='mtcnn', align=True, normalization='VGG-Face')
if result < 0.40:
detected_face = cv2.resize(detected_face, (200,200))
img = cv2.resize(img,(200,200))
final_result = cv2.hconcat([detected_face,img])
cv2.imshow("img", final_result)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cal embedding of img2
final_result =[] address_image=[] probable =[] with open("representation_ATS.pkl", 'rb') as file: data = pickle.load(file) for d in data: #list compri first iteration
print(data[i])
combined = list(zip(probable,address_image)) print("< 0.40 match",combined)
best_candidate = min(combined) print(best_candidate) final = cv2.imread(best_candidate[1]) final = cv2.resize(final,(200,200)) cv2.imshow("bestmatch",final) cv2.waitKey(0)
`