mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
Merge pull request #1103 from serengil/feat-task-1103-bugs-and-enhancements
Feat task 1103 bugs and enhancements
This commit is contained in:
commit
bef2cc9151
@ -22,8 +22,15 @@ class YuNetClient(Detector):
|
||||
"""
|
||||
|
||||
opencv_version = cv2.__version__.split(".")
|
||||
if not len(opencv_version) >= 2:
|
||||
raise ValueError(
|
||||
f"OpenCv's version must have major and minor values but it is {opencv_version}"
|
||||
)
|
||||
|
||||
if len(opencv_version) > 2 and int(opencv_version[0]) == 4 and int(opencv_version[1]) < 8:
|
||||
opencv_version_major = int(opencv_version[0])
|
||||
opencv_version_minor = int(opencv_version[1])
|
||||
|
||||
if opencv_version_major < 4 or (opencv_version_major == 4 and opencv_version_minor < 8):
|
||||
# min requirement: https://github.com/opencv/opencv_zoo/issues/172
|
||||
raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}")
|
||||
|
||||
@ -67,9 +74,9 @@ class YuNetClient(Detector):
|
||||
# resize image if it is too large (Yunet fails to detect faces on large input sometimes)
|
||||
# I picked 640 as a threshold because it is the default value of max_size in Yunet.
|
||||
resized = False
|
||||
r = 1 # resize factor
|
||||
if height > 640 or width > 640:
|
||||
r = 640.0 / max(height, width)
|
||||
original_image = img.copy()
|
||||
img = cv2.resize(img, (int(width * r), int(height * r)))
|
||||
height, width = img.shape[0], img.shape[1]
|
||||
resized = True
|
||||
@ -93,16 +100,12 @@ class YuNetClient(Detector):
|
||||
left eye, nose tip, the right corner and left corner of the mouth respectively.
|
||||
"""
|
||||
(x, y, w, h, x_re, y_re, x_le, y_le) = list(map(int, face[:8]))
|
||||
left_eye = (x_re, y_re)
|
||||
right_eye = (x_le, y_le)
|
||||
|
||||
# Yunet returns negative coordinates if it thinks part of
|
||||
# the detected face is outside the frame.
|
||||
# We set the coordinate to 0 if they are negative.
|
||||
# YuNet returns negative coordinates if it thinks part of the detected face
|
||||
# is outside the frame.
|
||||
x = max(x, 0)
|
||||
y = max(y, 0)
|
||||
if resized:
|
||||
img = original_image
|
||||
x, y, w, h = int(x / r), int(y / r), int(w / r), int(h / r)
|
||||
x_re, y_re, x_le, y_le = (
|
||||
int(x_re / r),
|
||||
@ -118,8 +121,8 @@ class YuNetClient(Detector):
|
||||
w=w,
|
||||
h=h,
|
||||
confidence=confidence,
|
||||
left_eye=left_eye,
|
||||
right_eye=right_eye,
|
||||
left_eye=(x_re, y_re),
|
||||
right_eye=(x_le, y_le),
|
||||
)
|
||||
resp.append(facial_area)
|
||||
return resp
|
||||
|
@ -93,6 +93,7 @@ def verify(
|
||||
model: FacialRecognition = modeling.build_model(model_name)
|
||||
dims = model.output_shape
|
||||
|
||||
# extract faces from img1
|
||||
if isinstance(img1_path, list):
|
||||
# given image is already pre-calculated embedding
|
||||
if not all(isinstance(dim, float) for dim in img1_path):
|
||||
@ -115,16 +116,20 @@ def verify(
|
||||
img1_embeddings = [img1_path]
|
||||
img1_facial_areas = [None]
|
||||
else:
|
||||
img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings(
|
||||
img_path=img1_path,
|
||||
model_name=model_name,
|
||||
detector_backend=detector_backend,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
normalization=normalization,
|
||||
)
|
||||
try:
|
||||
img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings(
|
||||
img_path=img1_path,
|
||||
model_name=model_name,
|
||||
detector_backend=detector_backend,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
normalization=normalization,
|
||||
)
|
||||
except ValueError as err:
|
||||
raise ValueError("Exception while processing img1_path") from err
|
||||
|
||||
# extract faces from img2
|
||||
if isinstance(img2_path, list):
|
||||
# given image is already pre-calculated embedding
|
||||
if not all(isinstance(dim, float) for dim in img2_path):
|
||||
@ -147,15 +152,18 @@ def verify(
|
||||
img2_embeddings = [img2_path]
|
||||
img2_facial_areas = [None]
|
||||
else:
|
||||
img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings(
|
||||
img_path=img2_path,
|
||||
model_name=model_name,
|
||||
detector_backend=detector_backend,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
normalization=normalization,
|
||||
)
|
||||
try:
|
||||
img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings(
|
||||
img_path=img2_path,
|
||||
model_name=model_name,
|
||||
detector_backend=detector_backend,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
normalization=normalization,
|
||||
)
|
||||
except ValueError as err:
|
||||
raise ValueError("Exception while processing img2_path") from err
|
||||
|
||||
no_facial_area = {
|
||||
"x": None,
|
||||
@ -218,18 +226,15 @@ def __extract_faces_and_embeddings(
|
||||
model: FacialRecognition = modeling.build_model(model_name)
|
||||
target_size = model.input_shape
|
||||
|
||||
try:
|
||||
img_objs = detection.extract_faces(
|
||||
img_path=img_path,
|
||||
target_size=target_size,
|
||||
detector_backend=detector_backend,
|
||||
grayscale=False,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
)
|
||||
except ValueError as err:
|
||||
raise ValueError("Exception while processing img1_path") from err
|
||||
img_objs = detection.extract_faces(
|
||||
img_path=img_path,
|
||||
target_size=target_size,
|
||||
detector_backend=detector_backend,
|
||||
grayscale=False,
|
||||
enforce_detection=enforce_detection,
|
||||
align=align,
|
||||
expand_percentage=expand_percentage,
|
||||
)
|
||||
|
||||
# find embeddings for each face
|
||||
for img_obj in img_objs:
|
||||
|
Loading…
x
Reference in New Issue
Block a user