From 049ab8cdffaa560e2ad1b539ceb577c8b6ef525c Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Mon, 11 Mar 2024 18:29:52 +0000 Subject: [PATCH 1/4] resolving bug 1096 --- deepface/detectors/YuNet.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deepface/detectors/YuNet.py b/deepface/detectors/YuNet.py index c30a8ac..cd2f1b6 100644 --- a/deepface/detectors/YuNet.py +++ b/deepface/detectors/YuNet.py @@ -22,8 +22,15 @@ class YuNetClient(Detector): """ opencv_version = cv2.__version__.split(".") + if not len(opencv_version) >= 2: + raise ValueError( + f"OpenCv's version must have major and minor values but it is {opencv_version}" + ) - if len(opencv_version) > 2 and int(opencv_version[0]) == 4 and int(opencv_version[1]) < 8: + opencv_version_major = int(opencv_version[0]) + opencv_version_minor = int(opencv_version[1]) + + if opencv_version_major < 4 or (opencv_version_major == 4 and opencv_version_minor < 8): # min requirement: https://github.com/opencv/opencv_zoo/issues/172 raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}") From 9705e6f4eff3b38ef5acdaa3ac7811b137189ac7 Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Mon, 11 Mar 2024 18:31:52 +0000 Subject: [PATCH 2/4] resolving issue 1097 --- deepface/detectors/YuNet.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deepface/detectors/YuNet.py b/deepface/detectors/YuNet.py index cd2f1b6..95baa87 100644 --- a/deepface/detectors/YuNet.py +++ b/deepface/detectors/YuNet.py @@ -74,9 +74,9 @@ class YuNetClient(Detector): # resize image if it is too large (Yunet fails to detect faces on large input sometimes) # I picked 640 as a threshold because it is the default value of max_size in Yunet. resized = False + r = 1 # resize factor if height > 640 or width > 640: r = 640.0 / max(height, width) - original_image = img.copy() img = cv2.resize(img, (int(width * r), int(height * r))) height, width = img.shape[0], img.shape[1] resized = True @@ -109,7 +109,6 @@ class YuNetClient(Detector): x = max(x, 0) y = max(y, 0) if resized: - img = original_image x, y, w, h = int(x / r), int(y / r), int(w / r), int(h / r) x_re, y_re, x_le, y_le = ( int(x_re / r), From ec4c3cd17c5f2607ee6d00723ef759ccdc7f062b Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Mon, 11 Mar 2024 18:36:30 +0000 Subject: [PATCH 3/4] resolving issue 1099 --- deepface/detectors/YuNet.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/deepface/detectors/YuNet.py b/deepface/detectors/YuNet.py index 95baa87..ec773ad 100644 --- a/deepface/detectors/YuNet.py +++ b/deepface/detectors/YuNet.py @@ -100,12 +100,9 @@ class YuNetClient(Detector): left eye, nose tip, the right corner and left corner of the mouth respectively. """ (x, y, w, h, x_re, y_re, x_le, y_le) = list(map(int, face[:8])) - left_eye = (x_re, y_re) - right_eye = (x_le, y_le) - # Yunet returns negative coordinates if it thinks part of - # the detected face is outside the frame. - # We set the coordinate to 0 if they are negative. + # YuNet returns negative coordinates if it thinks part of the detected face + # is outside the frame. x = max(x, 0) y = max(y, 0) if resized: @@ -124,8 +121,8 @@ class YuNetClient(Detector): w=w, h=h, confidence=confidence, - left_eye=left_eye, - right_eye=right_eye, + left_eye=(x_re, y_re), + right_eye=(x_le, y_le), ) resp.append(facial_area) return resp From 820ba5946b08ba9ec35bb6e06f0361432d0c8d1f Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Mon, 11 Mar 2024 18:39:00 +0000 Subject: [PATCH 4/4] resolving issue 1098 --- deepface/modules/verification.py | 65 +++++++++++++++++--------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/deepface/modules/verification.py b/deepface/modules/verification.py index 097a5bf..ae957db 100644 --- a/deepface/modules/verification.py +++ b/deepface/modules/verification.py @@ -93,6 +93,7 @@ def verify( model: FacialRecognition = modeling.build_model(model_name) dims = model.output_shape + # extract faces from img1 if isinstance(img1_path, list): # given image is already pre-calculated embedding if not all(isinstance(dim, float) for dim in img1_path): @@ -115,16 +116,20 @@ def verify( img1_embeddings = [img1_path] img1_facial_areas = [None] else: - img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings( - img_path=img1_path, - model_name=model_name, - detector_backend=detector_backend, - enforce_detection=enforce_detection, - align=align, - expand_percentage=expand_percentage, - normalization=normalization, - ) + try: + img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings( + img_path=img1_path, + model_name=model_name, + detector_backend=detector_backend, + enforce_detection=enforce_detection, + align=align, + expand_percentage=expand_percentage, + normalization=normalization, + ) + except ValueError as err: + raise ValueError("Exception while processing img1_path") from err + # extract faces from img2 if isinstance(img2_path, list): # given image is already pre-calculated embedding if not all(isinstance(dim, float) for dim in img2_path): @@ -147,15 +152,18 @@ def verify( img2_embeddings = [img2_path] img2_facial_areas = [None] else: - img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings( - img_path=img2_path, - model_name=model_name, - detector_backend=detector_backend, - enforce_detection=enforce_detection, - align=align, - expand_percentage=expand_percentage, - normalization=normalization, - ) + try: + img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings( + img_path=img2_path, + model_name=model_name, + detector_backend=detector_backend, + enforce_detection=enforce_detection, + align=align, + expand_percentage=expand_percentage, + normalization=normalization, + ) + except ValueError as err: + raise ValueError("Exception while processing img2_path") from err no_facial_area = { "x": None, @@ -218,18 +226,15 @@ def __extract_faces_and_embeddings( model: FacialRecognition = modeling.build_model(model_name) target_size = model.input_shape - try: - img_objs = detection.extract_faces( - img_path=img_path, - target_size=target_size, - detector_backend=detector_backend, - grayscale=False, - enforce_detection=enforce_detection, - align=align, - expand_percentage=expand_percentage, - ) - except ValueError as err: - raise ValueError("Exception while processing img1_path") from err + img_objs = detection.extract_faces( + img_path=img_path, + target_size=target_size, + detector_backend=detector_backend, + grayscale=False, + enforce_detection=enforce_detection, + align=align, + expand_percentage=expand_percentage, + ) # find embeddings for each face for img_obj in img_objs: