From b452bd26ac20b3f0226c36dfcb02dff0bc831817 Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Tue, 23 May 2023 19:18:38 +0200 Subject: [PATCH] Add YOLOv8n-face, with confidence --- deepface/detectors/FaceDetector.py | 9 +++-- deepface/detectors/OpenCvWrapper.py | 21 +++++++----- deepface/detectors/Yolov8nfaceWrapper.py | 42 ++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 14 deletions(-) create mode 100644 deepface/detectors/Yolov8nfaceWrapper.py diff --git a/deepface/detectors/FaceDetector.py b/deepface/detectors/FaceDetector.py index 367f588..3b02533 100644 --- a/deepface/detectors/FaceDetector.py +++ b/deepface/detectors/FaceDetector.py @@ -9,11 +9,11 @@ from deepface.detectors import ( MtcnnWrapper, RetinaFaceWrapper, MediapipeWrapper, + Yolov8nfaceWrapper, ) def build_model(detector_backend): - global face_detector_obj # singleton design pattern backends = { @@ -23,6 +23,7 @@ def build_model(detector_backend): "mtcnn": MtcnnWrapper.build_model, "retinaface": RetinaFaceWrapper.build_model, "mediapipe": MediapipeWrapper.build_model, + "yolov8n-face": Yolov8nfaceWrapper.build_model, } if not "face_detector_obj" in globals(): @@ -42,7 +43,6 @@ def build_model(detector_backend): def detect_face(face_detector, detector_backend, img, align=True): - obj = detect_faces(face_detector, detector_backend, img, align) if len(obj) > 0: @@ -50,12 +50,12 @@ def detect_face(face_detector, detector_backend, img, align=True): else: # len(obj) == 0 face = None region = [0, 0, img.shape[1], img.shape[0]] + confidence = 0 return face, region, confidence def detect_faces(face_detector, detector_backend, img, align=True): - backends = { "opencv": OpenCvWrapper.detect_face, "ssd": SsdWrapper.detect_face, @@ -63,6 +63,7 @@ def detect_faces(face_detector, detector_backend, img, align=True): "mtcnn": MtcnnWrapper.detect_face, "retinaface": RetinaFaceWrapper.detect_face, "mediapipe": MediapipeWrapper.detect_face, + "yolov8n-face": Yolov8nfaceWrapper.detect_face, } detect_face_fn = backends.get(detector_backend) @@ -76,7 +77,6 @@ def detect_faces(face_detector, detector_backend, img, align=True): def alignment_procedure(img, left_eye, right_eye): - # this function aligns given face in img based on left and right eye coordinates left_eye_x, left_eye_y = left_eye @@ -104,7 +104,6 @@ def alignment_procedure(img, left_eye, right_eye): # apply cosine rule if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation - cos_a = (b * b + c * c - a * a) / (2 * b * c) angle = np.arccos(cos_a) # angle in radian angle = (angle * 180) / math.pi # radian to degree diff --git a/deepface/detectors/OpenCvWrapper.py b/deepface/detectors/OpenCvWrapper.py index cd0fc95..8a8025b 100644 --- a/deepface/detectors/OpenCvWrapper.py +++ b/deepface/detectors/OpenCvWrapper.py @@ -45,6 +45,7 @@ def detect_face(detector, img, align=True): img_region = [0, 0, img.shape[1], img.shape[0]] faces = [] + scores = [] try: # faces = detector["face_detector"].detectMultiScale(img, 1.3, 5) @@ -52,19 +53,21 @@ def detect_face(detector, img, align=True): faces, _, scores = detector["face_detector"].detectMultiScale3( img, 1.1, 10, outputRejectLevels=True ) - except: - pass + except Exception: # pylint: disable=broad-except + # except alone is too broad and will catch keyboard interrupts + import traceback - if len(faces) > 0: - for (x, y, w, h), confidence in zip(faces, scores): - detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] + print(traceback.format_exc()) - if align: - detected_face = align_face(detector["eye_detector"], detected_face) + for (x, y, w, h), confidence in zip(faces, scores): + detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] - img_region = [x, y, w, h] + if align: + detected_face = align_face(detector["eye_detector"], detected_face) - resp.append((detected_face, img_region, confidence)) + img_region = [x, y, w, h] + + resp.append((detected_face, img_region, confidence)) return resp diff --git a/deepface/detectors/Yolov8nfaceWrapper.py b/deepface/detectors/Yolov8nfaceWrapper.py new file mode 100644 index 0000000..3843e98 --- /dev/null +++ b/deepface/detectors/Yolov8nfaceWrapper.py @@ -0,0 +1,42 @@ +def build_model(): + import gdown + import os + + from ultralytics import YOLO + + from deepface.commons.functions import get_deepface_home + + weights_path = f"{get_deepface_home()}/.deepface/weights/yolov8n-face.pt" + + if not os.path.isfile(weights_path): + url = "https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb" + gdown.download(url, weights_path, quiet=False) + print("Downloaded YOLO model yolo8vn-face.pt") + + # return face_detector + return YOLO(weights_path) + + +def detect_face(face_detector, img, align=False): + resp = [] + confidence = -1 + detected_face = None + + # if align: + # raise NotImplementedError("`align` is not implemented for Yolov8Wrapper") + + results = face_detector.predict(img, verbose=False, show=True, conf=0.25)[0] + + for result in results: + x, y, w, h = result.boxes.xywh.tolist()[0] + confidence = result.boxes.conf.tolist()[0] + # print(f"Confidence: {confidence}, x: {x}, y: {y}, w: {w}, h: {h}") + + # print landmarks + print(result.keypoints.tolist()) + # change to top left corner, width, height + x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h) + detected_face = img[y : y + h, x : x + w].copy() + resp.append((detected_face, [x, y, w, h], confidence)) + + return resp