All available YOLOv8-face models

This commit is contained in:
Vincent STRAGIER 2023-05-23 23:00:49 +02:00
parent b08695ac39
commit 659633f286
3 changed files with 71 additions and 49 deletions

View File

@ -9,7 +9,7 @@ from deepface.detectors import (
MtcnnWrapper,
RetinaFaceWrapper,
MediapipeWrapper,
Yolov8nfaceWrapper,
Yolov8faceWrapper,
)
@ -23,7 +23,9 @@ def build_model(detector_backend):
"mtcnn": MtcnnWrapper.build_model,
"retinaface": RetinaFaceWrapper.build_model,
"mediapipe": MediapipeWrapper.build_model,
"yolov8n-face": Yolov8nfaceWrapper.build_model,
"yolov8-lite-t": Yolov8faceWrapper.build_model("yolov8-lite-t"),
"yolov8-lite-s": Yolov8faceWrapper.build_model("yolov8-lite-s"),
"yolov8n": Yolov8faceWrapper.build_model("yolov8n"),
}
if not "face_detector_obj" in globals():
@ -63,7 +65,9 @@ def detect_faces(face_detector, detector_backend, img, align=True):
"mtcnn": MtcnnWrapper.detect_face,
"retinaface": RetinaFaceWrapper.detect_face,
"mediapipe": MediapipeWrapper.detect_face,
"yolov8n-face": Yolov8nfaceWrapper.detect_face,
"yolov8-lite-t": Yolov8faceWrapper.detect_face,
"yolov8-lite-s": Yolov8faceWrapper.detect_face,
"yolov8n": Yolov8faceWrapper.detect_face,
}
detect_face_fn = backends.get(detector_backend)

View File

@ -0,0 +1,64 @@
from deepface.detectors import FaceDetector
PATHS = {
"yolov8-lite-t": "/.deepface/weights/yolov8-lite-t.pt",
"yolov8-lite-s": "/.deepface/weights/yolov8-lite-s.pt",
"yolov8n": "/.deepface/weights/yolov8n-face.pt",
}
BASE_URL = "https://drive.google.com/uc?id="
IDS = {
"yolov8-lite-t": "1vFMGW8xtRVo9bfC9yJVWWGY7vVxbLh94",
"yolov8-lite-s": "1ckpBT8KfwURTvTm5pa-cMC89A0V5jbaq",
"yolov8n": "1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb",
}
def build_model(model: str):
"""Function factory for YOLO models"""
from deepface.commons.functions import get_deepface_home
func_weights_path = f"{get_deepface_home()}{PATHS[model]}"
func_url = f"{BASE_URL}{IDS[model]}"
def _build_model(weights_path: str = func_weights_path, url: str = func_url):
import gdown
import os
from ultralytics import YOLO
if not os.path.isfile(weights_path):
gdown.download(url, weights_path, quiet=False)
print(f"Downloaded YOLO model {os.path.basename(PATHS[model])}")
# return face_detector
return YOLO(weights_path)
return _build_model
def detect_face(face_detector, img, align=False):
resp = []
results = face_detector.predict(img, verbose=False, show=True, conf=0.25)[0]
for result in results:
x, y, w, h = result.boxes.xywh.tolist()[0]
confidence = result.boxes.conf.tolist()[0]
x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h)
detected_face = img[y : y + h, x : x + w].copy()
if align:
# Extract landmarks
left_eye, right_eye, _, _, _ = result.keypoints.tolist()
# Check the landmarks confidence before alignment
if left_eye[2] > 0.5 and right_eye[2] > 0.5:
detected_face = FaceDetector.alignment_procedure(
detected_face, left_eye[:2], right_eye[:2]
)
resp.append((detected_face, [x, y, w, h], confidence))
return resp

View File

@ -1,46 +0,0 @@
from deepface.detectors import FaceDetector
def build_model():
import gdown
import os
from ultralytics import YOLO
from deepface.commons.functions import get_deepface_home
weights_path = f"{get_deepface_home()}/.deepface/weights/yolov8n-face.pt"
if not os.path.isfile(weights_path):
url = "https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb"
gdown.download(url, weights_path, quiet=False)
print("Downloaded YOLO model yolo8vn-face.pt")
# return face_detector
return YOLO(weights_path)
def detect_face(face_detector, img, align=False):
resp = []
results = face_detector.predict(img, verbose=False, show=True, conf=0.25)[0]
for result in results:
x, y, w, h = result.boxes.xywh.tolist()[0]
confidence = result.boxes.conf.tolist()[0]
x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h)
detected_face = img[y : y + h, x : x + w].copy()
if align:
# Extract landmarks
left_eye, right_eye, _, _, _ = result.keypoints.tolist()
# Check the landmarks confidence before alignment
if left_eye[2] > 0.5 and right_eye[2] > 0.5:
detected_face = FaceDetector.alignment_procedure(
detected_face, left_eye[:2], right_eye[:2]
)
resp.append((detected_face, [x, y, w, h], confidence))
return resp