diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py index 6813691..0f41a4e 100644 --- a/deepface/DeepFace.py +++ b/deepface/DeepFace.py @@ -48,17 +48,17 @@ if tf_version == 2: folder_utils.initialize_folder() -def build_model(model_name: str) -> Any: +def build_model(task: str, model_name: str) -> Any: """ This function builds a deepface model Args: - model_name (string): face recognition or facial attribute model + model_name (string): face recognition, facial attribute or face detector model VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition Age, Gender, Emotion, Race for facial attributes Returns: built_model """ - return modeling.build_model(model_name=model_name) + return modeling.build_model(task=task, model_name=model_name) def verify( diff --git a/deepface/detectors/DetectorWrapper.py b/deepface/detectors/DetectorWrapper.py index 75f82a6..79ab72a 100644 --- a/deepface/detectors/DetectorWrapper.py +++ b/deepface/detectors/DetectorWrapper.py @@ -1,64 +1,13 @@ -from typing import Any, List, Tuple +from typing import List, Tuple import numpy as np import cv2 -from deepface.modules import detection +from deepface.modules import detection, modeling from deepface.models.Detector import Detector, DetectedFace, FacialAreaRegion -from deepface.detectors import ( - FastMtCnn, - MediaPipe, - MtCnn, - OpenCv, - Dlib, - RetinaFace, - Ssd, - Yolo, - YuNet, - CenterFace, -) from deepface.commons.logger import Logger logger = Logger() -def build_model(detector_backend: str) -> Any: - """ - Build a face detector model - Args: - detector_backend (str): backend detector name - Returns: - built detector (Any) - """ - global face_detector_obj # singleton design pattern - - backends = { - "opencv": OpenCv.OpenCvClient, - "mtcnn": MtCnn.MtCnnClient, - "ssd": Ssd.SsdClient, - "dlib": Dlib.DlibClient, - "retinaface": RetinaFace.RetinaFaceClient, - "mediapipe": MediaPipe.MediaPipeClient, - "yolov8": Yolo.YoloClient, - "yunet": YuNet.YuNetClient, - "fastmtcnn": FastMtCnn.FastMtCnnClient, - "centerface": CenterFace.CenterFaceClient, - } - - if not "face_detector_obj" in globals(): - face_detector_obj = {} - - built_models = list(face_detector_obj.keys()) - if detector_backend not in built_models: - face_detector = backends.get(detector_backend) - - if face_detector: - face_detector = face_detector() - face_detector_obj[detector_backend] = face_detector - else: - raise ValueError("invalid detector_backend passed - " + detector_backend) - - return face_detector_obj[detector_backend] - - def detect_faces( detector_backend: str, img: np.ndarray, align: bool = True, expand_percentage: int = 0 ) -> List[DetectedFace]: @@ -87,7 +36,9 @@ def detect_faces( """ height, width, _ = img.shape - face_detector: Detector = build_model(detector_backend) + face_detector: Detector = modeling.build_model( + task="face_detector", model_name=detector_backend + ) # validate expand percentage score if expand_percentage < 0: diff --git a/deepface/modules/demography.py b/deepface/modules/demography.py index 1634571..e33cbce 100644 --- a/deepface/modules/demography.py +++ b/deepface/modules/demography.py @@ -158,7 +158,9 @@ def analyze( pbar.set_description(f"Action: {action}") if action == "emotion": - emotion_predictions = modeling.build_model("Emotion").predict(img_content) + emotion_predictions = modeling.build_model( + task="facial_attribute", model_name="Emotion" + ).predict(img_content) sum_of_predictions = emotion_predictions.sum() obj["emotion"] = {} @@ -169,12 +171,16 @@ def analyze( obj["dominant_emotion"] = Emotion.labels[np.argmax(emotion_predictions)] elif action == "age": - apparent_age = modeling.build_model("Age").predict(img_content) + apparent_age = modeling.build_model( + task="facial_attribute", model_name="Age" + ).predict(img_content) # int cast is for exception - object of type 'float32' is not JSON serializable obj["age"] = int(apparent_age) elif action == "gender": - gender_predictions = modeling.build_model("Gender").predict(img_content) + gender_predictions = modeling.build_model( + task="facial_attribute", model_name="Gender" + ).predict(img_content) obj["gender"] = {} for i, gender_label in enumerate(Gender.labels): gender_prediction = 100 * gender_predictions[i] @@ -183,7 +189,9 @@ def analyze( obj["dominant_gender"] = Gender.labels[np.argmax(gender_predictions)] elif action == "race": - race_predictions = modeling.build_model("Race").predict(img_content) + race_predictions = modeling.build_model( + task="facial_attribute", model_name="Race" + ).predict(img_content) sum_of_predictions = race_predictions.sum() obj["race"] = {} diff --git a/deepface/modules/detection.py b/deepface/modules/detection.py index 9edb0f2..f0a3874 100644 --- a/deepface/modules/detection.py +++ b/deepface/modules/detection.py @@ -25,7 +25,7 @@ def extract_faces( align: bool = True, expand_percentage: int = 0, grayscale: bool = False, - color_face: str = 'rgb', + color_face: str = "rgb", normalize_face: bool = True, anti_spoofing: bool = False, ) -> List[Dict[str, Any]]: @@ -126,16 +126,14 @@ def extract_faces( logger.warn("Parameter grayscale is deprecated. Use color_face instead.") current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY) else: - if color_face == 'rgb': + if color_face == "rgb": current_img = current_img[:, :, ::-1] - elif color_face == 'bgr': + elif color_face == "bgr": pass # image is in BGR - elif color_face == 'gray': + elif color_face == "gray": current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY) else: - raise ValueError( - f"The color_face can be rgb, bgr or gray, but it is {color_face}." - ) + raise ValueError(f"The color_face can be rgb, bgr or gray, but it is {color_face}.") if normalize_face: current_img = current_img / 255 # normalize input in [0, 1] @@ -159,7 +157,7 @@ def extract_faces( } if anti_spoofing is True: - antispoof_model = modeling.build_model(model_name="Fasnet") + antispoof_model = modeling.build_model(task="spoofing", model_name="Fasnet") is_real, antispoof_score = antispoof_model.analyze(img=img, facial_area=(x, y, w, h)) resp_obj["is_real"] = is_real resp_obj["antispoof_score"] = antispoof_score diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py index 60b6a71..250102b 100644 --- a/deepface/modules/modeling.py +++ b/deepface/modules/modeling.py @@ -13,51 +13,90 @@ from deepface.basemodels import ( Facenet, GhostFaceNet, ) +from deepface.detectors import ( + FastMtCnn, + MediaPipe, + MtCnn, + OpenCv, + Dlib as DlibDetector, + RetinaFace, + Ssd, + Yolo, + YuNet, + CenterFace, +) from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.spoofmodels import FasNet -def build_model(model_name: str) -> Any: +def build_model(task: str, model_name: str) -> Any: """ - This function builds a deepface model + This function loads a pre-trained models as singletonish way Parameters: - model_name (string): face recognition or facial attribute model - VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition - Age, Gender, Emotion, Race for facial attributes - + task (str): facial_recognition, facial_attribute, face_detector, spoofing + model_name (str): model identifier + - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, + ArcFace, SFace, GhostFaceNet for face recognition + - Age, Gender, Emotion, Race for facial attributes + - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yunet, + fastmtcnn or centerface for face detectors + - Fasnet for spoofing Returns: built model class """ + tasks = ["facial_recognition", "spoofing", "facial_attribute", "face_detector"] + + if task not in tasks: + raise ValueError(f"unimplemented task - {task}") + # singleton design pattern global model_obj models = { - "VGG-Face": VGGFace.VggFaceClient, - "OpenFace": OpenFace.OpenFaceClient, - "Facenet": Facenet.FaceNet128dClient, - "Facenet512": Facenet.FaceNet512dClient, - "DeepFace": FbDeepFace.DeepFaceClient, - "DeepID": DeepID.DeepIdClient, - "Dlib": Dlib.DlibClient, - "ArcFace": ArcFace.ArcFaceClient, - "SFace": SFace.SFaceClient, - "GhostFaceNet": GhostFaceNet.GhostFaceNetClient, - "Emotion": Emotion.EmotionClient, - "Age": Age.ApparentAgeClient, - "Gender": Gender.GenderClient, - "Race": Race.RaceClient, - "Fasnet": FasNet.Fasnet, + "facial_recognition": { + "VGG-Face": VGGFace.VggFaceClient, + "OpenFace": OpenFace.OpenFaceClient, + "Facenet": Facenet.FaceNet128dClient, + "Facenet512": Facenet.FaceNet512dClient, + "DeepFace": FbDeepFace.DeepFaceClient, + "DeepID": DeepID.DeepIdClient, + "Dlib": Dlib.DlibClient, + "ArcFace": ArcFace.ArcFaceClient, + "SFace": SFace.SFaceClient, + "GhostFaceNet": GhostFaceNet.GhostFaceNetClient, + }, + "spoofing": { + "Fasnet": FasNet.Fasnet, + }, + "facial_attribute": { + "Emotion": Emotion.EmotionClient, + "Age": Age.ApparentAgeClient, + "Gender": Gender.GenderClient, + "Race": Race.RaceClient, + }, + "face_detector": { + "opencv": OpenCv.OpenCvClient, + "mtcnn": MtCnn.MtCnnClient, + "ssd": Ssd.SsdClient, + "dlib": DlibDetector.DlibClient, + "retinaface": RetinaFace.RetinaFaceClient, + "mediapipe": MediaPipe.MediaPipeClient, + "yolov8": Yolo.YoloClient, + "yunet": YuNet.YuNetClient, + "fastmtcnn": FastMtCnn.FastMtCnnClient, + "centerface": CenterFace.CenterFaceClient, + }, } if not "model_obj" in globals(): - model_obj = {} + model_obj = {current_task: {} for current_task in tasks} - if not model_name in model_obj.keys(): - model = models.get(model_name) + if not model_name in model_obj[task].keys(): + model = models[task].get(model_name) if model: - model_obj[model_name] = model() + model_obj[task][model_name] = model() else: raise ValueError(f"Invalid model_name passed - {model_name}") - return model_obj[model_name] + return model_obj[task][model_name] diff --git a/deepface/modules/representation.py b/deepface/modules/representation.py index 5a1036b..b187ce4 100644 --- a/deepface/modules/representation.py +++ b/deepface/modules/representation.py @@ -65,7 +65,9 @@ def represent( """ resp_objs = [] - model: FacialRecognition = modeling.build_model(model_name) + model: FacialRecognition = modeling.build_model( + task="facial_recognition", model_name=model_name + ) # --------------------------------- # we have run pre-process in verification. so, this can be skipped if it is coming from verify. diff --git a/deepface/modules/streaming.py b/deepface/modules/streaming.py index fe27464..bd77ce1 100644 --- a/deepface/modules/streaming.py +++ b/deepface/modules/streaming.py @@ -171,7 +171,7 @@ def build_facial_recognition_model(model_name: str) -> None: Returns input_shape (tuple): input shape of given facial recognitio n model. """ - _ = DeepFace.build_model(model_name=model_name) + _ = DeepFace.build_model(task="facial_recognition", model_name=model_name) logger.info(f"{model_name} is built") @@ -267,11 +267,11 @@ def build_demography_models(enable_face_analysis: bool) -> None: """ if enable_face_analysis is False: return - DeepFace.build_model(model_name="Age") + DeepFace.build_model(task="facial_attribute", model_name="Age") logger.info("Age model is just built") - DeepFace.build_model(model_name="Gender") + DeepFace.build_model(task="facial_attribute", model_name="Gender") logger.info("Gender model is just built") - DeepFace.build_model(model_name="Emotion") + DeepFace.build_model(task="facial_attribute", model_name="Emotion") logger.info("Emotion model is just built") diff --git a/deepface/modules/verification.py b/deepface/modules/verification.py index 7bd3c6b..69198b8 100644 --- a/deepface/modules/verification.py +++ b/deepface/modules/verification.py @@ -100,7 +100,9 @@ def verify( tic = time.time() - model: FacialRecognition = modeling.build_model(model_name) + model: FacialRecognition = modeling.build_model( + task="facial_recognition", model_name=model_name + ) dims = model.output_shape # extract faces from img1