Closed segalinc closed 3 years ago
Just an example, how it works:
import face_recognition
from face_recognition import api as frapi
import numpy as np
import dlib
class Full_object_detection_masked(dlib.full_object_detection):
def part(self, idx:int):
if idx in range(2, 15) or idx in range(48, 68):
return (0, 0)
return super().part(idx)
def parts(self):
lst = dlib.points()
for idx in range(0, 2):
old_x = super().part(idx).x
old_y = super().part(idx).y
lst.insert(idx, dlib.point(old_x, old_y))
for idx in range(2, 15):
lst.insert(idx, dlib.point(0, 0))
for idx in range(15, 29):
old_x = super().part(idx).x
old_y = super().part(idx).y
lst.insert(idx, dlib.point(old_x, old_y))
for idx in range(29, 36):
lst.insert(idx, dlib.point(0, 0))
for idx in range(36, 48):
old_x = super().part(idx).x
old_y = super().part(idx).y
lst.insert(idx, dlib.point(old_x, old_y))
for idx in range(48, 68):
lst.insert(idx, dlib.point(0, 0))
return lst
def face_encodings_masked(face_image, known_face_locations=None, num_jitters=1, model="large"):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster.
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = frapi._raw_face_landmarks(face_image,
known_face_locations,
model)
masked_raw_landmarks = []
for lm in raw_landmarks:
masked_raw_landmarks.append(Full_object_detection_masked(lm.rect, lm.parts()))
return [np.array(
frapi.face_encoder.compute_face_descriptor(
face_image,
raw_landmark_set,
num_jitters)) for raw_landmark_set in masked_raw_landmarks]
Warning: this issue has been inactive for 35 days and will be automatically closed on 2021-07-07 if there is no further activity.
If you are waiting for a response but haven't received one it's possible your question is somehow inappropriate. E.g. it is off topic, you didn't follow the issue submission instructions, or your question is easily answerable by reading the FAQ, dlib's official compilation instructions, dlib's API documentation, or a Google search.
Warning: this issue has been inactive for 42 days and will be automatically closed on 2021-07-07 if there is no further activity.
If you are waiting for a response but haven't received one it's possible your question is somehow inappropriate. E.g. it is off topic, you didn't follow the issue submission instructions, or your question is easily answerable by reading the FAQ, dlib's official compilation instructions, dlib's API documentation, or a Google search.
Notice: this issue has been closed because it has been inactive for 45 days. You may reopen this issue if it has been closed in error.
Hi,
I want to use my landmarks array to extract the face descriptor. How do I convert a np.ndarray (68,2) to the _dlib_pybind11.full_object_detection ?
I was trying to use dlib.get_face_chip in case to pass directly the aligned face but that also wants the lanmarks in the dlib format