using type hinting for backend functions

This commit is contained in:
Sefik Ilkin Serengil 2023-12-24 14:38:14 +00:00
parent 58945bd14d
commit f23ab85fd1
29 changed files with 334 additions and 129 deletions

View File

@ -889,8 +889,12 @@ def extract_faces(
@deprecated(version="0.0.78", reason="Use DeepFace.extract_faces instead of DeepFace.detectFace")
def detectFace(
img_path, target_size=(224, 224), detector_backend="opencv", enforce_detection=True, align=True
):
img_path: Union[str, np.ndarray],
target_size: tuple = (224, 224),
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
) -> np.ndarray:
"""
Deprecated function. Use extract_faces for same functionality.
@ -942,7 +946,7 @@ def detectFace(
functions.initialize_folder()
def cli():
def cli() -> None:
"""
command line interface function will be offered in this block
"""

View File

@ -14,8 +14,8 @@ logger = Logger(module="basemodels.ArcFace")
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model
from keras.engine import training
import keras
from keras.layers import (
ZeroPadding2D,
Input,
@ -28,8 +28,8 @@ if tf_version == 1:
Dense,
)
else:
from tensorflow.keras.models import Model
from tensorflow.python.keras.engine import training
from tensorflow import keras
from tensorflow.keras.layers import (
ZeroPadding2D,
Input,
@ -41,15 +41,11 @@ else:
Flatten,
Dense,
)
# --------------------------------
# url = "https://drive.google.com/uc?id=1LVB3CdVejpmGHM28BpqqkbZP5hDEcdZY"
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
):
) -> Model:
base_model = ResNet34()
inputs = base_model.inputs[0]
arcface_model = base_model.outputs[0]
@ -62,7 +58,7 @@ def loadModel(
embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
arcface_model
)
model = keras.models.Model(inputs, embedding, name=base_model.name)
model = Model(inputs, embedding, name=base_model.name)
# ---------------------------------------
# check the availability of pre-trained weights
@ -84,7 +80,7 @@ def loadModel(
return model
def ResNet34():
def ResNet34() -> Model:
img_input = Input(shape=(112, 112, 3))

View File

@ -41,7 +41,7 @@ else:
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
):
) -> Model:
myInput = Input(shape=(55, 47, 3))

View File

@ -13,8 +13,14 @@ logger = Logger(module="basemodels.DlibResNet")
class DlibResNet:
def __init__(self):
# this is not a must dependency
import dlib # 19.20.0
## this is not a must dependency. do not import it in the global level.
try:
import dlib
except ModuleNotFoundError as e:
raise ImportError(
"Dlib is an optional dependency, ensure the library is installed."
"Please install using 'pip install dlib' "
) from e
self.layers = [DlibMetaData()]
@ -49,7 +55,7 @@ class DlibResNet:
# return None # classes must return None
def predict(self, img_aligned):
def predict(self, img_aligned: np.ndarray) -> np.ndarray:
# functions.detectFace returns 4 dimensional images
if len(img_aligned.shape) == 4:

View File

@ -1,5 +1,6 @@
from typing import Any
from deepface.basemodels.DlibResNet import DlibResNet
def loadModel():
def loadModel() -> Any:
return DlibResNet()

View File

@ -47,7 +47,7 @@ def scaling(x, scale):
return x * scale
def InceptionResNetV2(dimension=128):
def InceptionResNetV2(dimension=128) -> Model:
inputs = Input(shape=(160, 160, 3))
x = Conv2D(32, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_1a_3x3")(inputs)
@ -1618,12 +1618,9 @@ def InceptionResNetV2(dimension=128):
return model
# url = 'https://drive.google.com/uc?id=1971Xk5RwedbudGgTIrGAL4F7Aifu7id1'
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet_weights.h5",
):
) -> Model:
model = InceptionResNetV2()
# -----------------------------------

View File

@ -1,14 +1,23 @@
import os
import gdown
import tensorflow as tf
from deepface.basemodels import Facenet
from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.Facenet512")
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model
else:
from tensorflow.keras.models import Model
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
):
) -> Model:
model = Facenet.InceptionResNetV2(dimension=512)

View File

@ -40,7 +40,7 @@ else:
def loadModel(
url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
):
) -> Model:
base_model = Sequential()
base_model.add(
Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))

View File

@ -27,7 +27,7 @@ else:
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
):
) -> Model:
myInput = Input(shape=(96, 96, 3))
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)

View File

@ -1,4 +1,6 @@
import os
from typing import Any
import numpy as np
import cv2 as cv
import gdown
@ -25,7 +27,7 @@ class SFaceModel:
self.layers = [_Layer()]
def predict(self, image):
def predict(self, image: np.ndarray) -> np.ndarray:
# Preprocess
input_blob = (image[0] * 255).astype(
np.uint8
@ -39,7 +41,7 @@ class SFaceModel:
def load_model(
url="https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
):
) -> Any:
home = functions.get_deepface_home()

View File

@ -34,7 +34,7 @@ else:
# ---------------------------------------
def baseModel():
def baseModel() -> Sequential:
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
@ -83,17 +83,12 @@ def baseModel():
return model
# url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
):
) -> Model:
model = baseModel()
# -----------------------------------
home = functions.get_deepface_home()
output = home + "/.deepface/weights/vgg_face_weights.h5"
@ -101,13 +96,8 @@ def loadModel(
logger.info("vgg_face_weights.h5 will be downloaded...")
gdown.download(url, output, quiet=False)
# -----------------------------------
model.load_weights(output)
# -----------------------------------
# TO-DO: why?
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
return vgg_face_descriptor

View File

@ -1,14 +1,25 @@
from typing import Union
import numpy as np
def findCosineDistance(source_representation, test_representation):
def findCosineDistance(
source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
) -> np.float64:
if isinstance(source_representation, list):
source_representation = np.array(source_representation)
if isinstance(test_representation, list):
test_representation = np.array(test_representation)
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
def findEuclideanDistance(
source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
) -> np.float64:
if isinstance(source_representation, list):
source_representation = np.array(source_representation)
@ -21,11 +32,11 @@ def findEuclideanDistance(source_representation, test_representation):
return euclidean_distance
def l2_normalize(x):
def l2_normalize(x: np.ndarray) -> np.ndarray:
return x / np.sqrt(np.sum(np.multiply(x, x)))
def findThreshold(model_name, distance_metric):
def findThreshold(model_name: str, distance_metric: str) -> float:
base_threshold = {"cosine": 0.40, "euclidean": 0.55, "euclidean_l2": 0.75}

View File

@ -1,10 +1,11 @@
import os
from typing import Union, Tuple
import base64
from pathlib import Path
from PIL import Image
import requests
# 3rd party dependencies
from PIL import Image
import requests
import numpy as np
import cv2
import tensorflow as tf
@ -33,7 +34,7 @@ elif tf_major_version == 2:
# --------------------------------------------------
def initialize_folder():
def initialize_folder() -> None:
"""Initialize the folder for storing weights and models.
Raises:
@ -52,7 +53,7 @@ def initialize_folder():
logger.info(f"Directory {home}/.deepface/weights created")
def get_deepface_home():
def get_deepface_home() -> str:
"""Get the home directory for storing weights and models.
Returns:
@ -64,7 +65,7 @@ def get_deepface_home():
# --------------------------------------------------
def loadBase64Img(uri):
def loadBase64Img(uri: str) -> np.ndarray:
"""Load image from base64 string.
Args:
@ -80,7 +81,7 @@ def loadBase64Img(uri):
return img_bgr
def load_image(img):
def load_image(img: Union[str, np.ndarray]) -> Tuple[np.ndarray, str]:
"""
Load image from path, url, base64 or numpy array.
Args:
@ -91,15 +92,18 @@ def load_image(img):
"""
# The image is already a numpy array
if type(img).__module__ == np.__name__:
return img, None
if isinstance(img, np.ndarray):
return img, "numpy array"
if isinstance(img, Path):
img = str(img)
if not isinstance(img, str):
raise ValueError(f"img must be numpy array or str but it is {type(img)}")
# The image is a base64 string
if img.startswith("data:image/"):
return loadBase64Img(img), None
return loadBase64Img(img), "base64 encoded string"
# The image is a url
if img.startswith("http"):
@ -128,13 +132,13 @@ def load_image(img):
def extract_faces(
img,
target_size=(224, 224),
detector_backend="opencv",
grayscale=False,
enforce_detection=True,
align=True,
):
img: Union[str, np.ndarray],
target_size: tuple = (224, 224),
detector_backend: str = "opencv",
grayscale: bool = False,
enforce_detection: bool = True,
align: bool = True,
) -> list:
"""Extract faces from an image.
Args:
@ -252,7 +256,7 @@ def extract_faces(
return extracted_faces
def normalize_input(img, normalization="base"):
def normalize_input(img: np.ndarray, normalization: str = "base") -> np.ndarray:
"""Normalize input image.
Args:
@ -310,7 +314,7 @@ def normalize_input(img, normalization="base"):
return img
def find_target_size(model_name):
def find_target_size(model_name: str) -> tuple:
"""Find the target size of the model.
Args:
@ -346,17 +350,18 @@ def find_target_size(model_name):
@deprecated(version="0.0.78", reason="Use extract_faces instead of preprocess_face")
def preprocess_face(
img,
img: Union[str, np.ndarray],
target_size=(224, 224),
detector_backend="opencv",
grayscale=False,
enforce_detection=True,
align=True,
):
"""Preprocess face.
) -> Union[np.ndarray, None]:
"""
Preprocess only one face
Args:
img (numpy array): the input image.
img (str or numpy): the input image.
target_size (tuple, optional): the target size. Defaults to (224, 224).
detector_backend (str, optional): the detector backend. Defaults to "opencv".
grayscale (bool, optional): whether to convert to grayscale. Defaults to False.
@ -364,7 +369,7 @@ def preprocess_face(
align (bool, optional): whether to align the face. Defaults to True.
Returns:
numpy array: the preprocessed face.
loaded image (numpt): the preprocessed face.
Raises:
ValueError: if face is not detected and enforce_detection is True.

View File

@ -12,7 +12,8 @@ class Logger:
except Exception as err:
self.dump_log(
f"Exception while parsing $DEEPFACE_LOG_LEVEL."
f"Expected int but it is {log_level} ({str(err)})"
f"Expected int but it is {log_level} ({str(err)})."
"Setting app log level to info."
)
self.log_level = logging.INFO

View File

@ -1,14 +1,19 @@
import os
import bz2
import gdown
import numpy as np
from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="detectors.DlibWrapper")
def build_model():
def build_model() -> dict:
"""
Build a dlib hog face detector model
Returns:
model (Any)
"""
home = functions.get_deepface_home()
# this is not a must dependency. do not import it in the global level.
@ -46,8 +51,16 @@ def build_model():
return detector
def detect_face(detector, img, align=True):
def detect_face(detector: dict, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with dlib
Args:
face_detector (Any): dlib face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
# this is not a must dependency. do not import it in the global level.
try:
import dlib

View File

@ -1,3 +1,4 @@
from typing import Any, Union
from PIL import Image
import numpy as np
from deepface.detectors import (
@ -13,7 +14,14 @@ from deepface.detectors import (
)
def build_model(detector_backend):
def build_model(detector_backend: str) -> Any:
"""
Build a face detector model
Args:
detector_backend (str): backend detector name
Returns:
built detector (Any)
"""
global face_detector_obj # singleton design pattern
backends = {
@ -44,7 +52,20 @@ def build_model(detector_backend):
return face_detector_obj[detector_backend]
def detect_face(face_detector, detector_backend, img, align=True):
def detect_face(
face_detector: Any, detector_backend: str, img: np.ndarray, align: bool = True
) -> tuple:
"""
Detect a single face from a given image
Args:
face_detector (Any): pre-built face detector object
detector_backend (str): detector name
img (np.ndarray): pre-loaded image
alig (bool): enable or disable alignment after detection
Returns
result (tuple): tuple of face (np.ndarray), face region (list)
, confidence score (float)
"""
obj = detect_faces(face_detector, detector_backend, img, align)
if len(obj) > 0:
@ -60,7 +81,20 @@ def detect_face(face_detector, detector_backend, img, align=True):
return face, region, confidence
def detect_faces(face_detector, detector_backend, img, align=True):
def detect_faces(
face_detector: Any, detector_backend: str, img: np.ndarray, align: bool = True
) -> list:
"""
Detect face(s) from a given image
Args:
face_detector (Any): pre-built face detector object
detector_backend (str): detector name
img (np.ndarray): pre-loaded image
alig (bool): enable or disable alignment after detection
Returns
result (list): tuple of face (np.ndarray), face region (list)
, confidence score (float)
"""
backends = {
"opencv": OpenCvWrapper.detect_face,
"ssd": SsdWrapper.detect_face,
@ -83,18 +117,32 @@ def detect_faces(face_detector, detector_backend, img, align=True):
raise ValueError("invalid detector_backend passed - " + detector_backend)
def get_alignment_angle_arctan2(left_eye, right_eye):
def get_alignment_angle_arctan2(
left_eye: Union[list, tuple], right_eye: Union[list, tuple]
) -> float:
"""
The left_eye is the eye to the left of the viewer,
i.e., right eye of the person in the image.
The top-left point of the frame is (0, 0).
Find the angle between eyes
Args:
left_eye: coordinates of left eye with respect to the you
right_eye: coordinates of right eye with respect to the you
Returns:
angle (float)
"""
return float(np.degrees(
np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])
))
return float(np.degrees(np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])))
def alignment_procedure(img, left_eye, right_eye):
def alignment_procedure(
img: np.ndarray, left_eye: Union[list, tuple], right_eye: Union[list, tuple]
) -> np.ndarray:
"""
Rotate given image until eyes are on a horizontal line
Args:
img (np.ndarray): pre-loaded image
left_eye: coordinates of left eye with respect to the you
right_eye: coordinates of right eye with respect to the you
Returns:
result (np.ndarray): aligned face
"""
angle = get_alignment_angle_arctan2(left_eye, right_eye)
img = Image.fromarray(img)
img = np.array(img.rotate(angle))

View File

@ -1,11 +1,18 @@
from typing import Any, Union
import cv2
import numpy as np
from deepface.detectors import FaceDetector
# Link -> https://github.com/timesler/facenet-pytorch
# Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
def build_model():
def build_model() -> Any:
"""
Build a fast mtcnn face detector model
Returns:
model (Any)
"""
# this is not a must dependency. do not import it in the global level.
try:
from facenet_pytorch import MTCNN as fast_mtcnn
@ -25,7 +32,7 @@ def build_model():
return face_detector
def xyxy_to_xywh(xyxy):
def xyxy_to_xywh(xyxy: Union[list, tuple]) -> list:
"""
Convert xyxy format to xywh format.
"""
@ -35,8 +42,16 @@ def xyxy_to_xywh(xyxy):
return [x, y, w, h]
def detect_face(face_detector, img, align=True):
def detect_face(face_detector: Any, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with mtcnn
Args:
face_detector (Any): mtcnn face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
detected_face = None

View File

@ -1,9 +1,16 @@
from typing import Any
import numpy as np
from deepface.detectors import FaceDetector
# Link - https://google.github.io/mediapipe/solutions/face_detection
def build_model():
def build_model() -> Any:
"""
Build a mediapipe face detector model
Returns:
model (Any)
"""
# this is not a must dependency. do not import it in the global level.
try:
import mediapipe as mp
@ -18,7 +25,16 @@ def build_model():
return face_detection
def detect_face(face_detector, img, align=True):
def detect_face(face_detector: Any, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with mediapipe
Args:
face_detector (Any): mediapipe face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
img_width = img.shape[1]

View File

@ -1,15 +1,31 @@
from typing import Any
import cv2
import numpy as np
from deepface.detectors import FaceDetector
def build_model():
def build_model() -> Any:
"""
Build a mtcnn face detector model
Returns:
model (Any)
"""
from mtcnn import MTCNN
face_detector = MTCNN()
return face_detector
def detect_face(face_detector, img, align=True):
def detect_face(face_detector: Any, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with mtcnn
Args:
face_detector (mtcnn.MTCNN): mtcnn face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []

View File

@ -1,16 +1,28 @@
import os
from typing import Any
import cv2
import numpy as np
from deepface.detectors import FaceDetector
def build_model():
def build_model() -> dict:
"""
Build a opencv face&eye detector models
Returns:
model (Any)
"""
detector = {}
detector["face_detector"] = build_cascade("haarcascade")
detector["eye_detector"] = build_cascade("haarcascade_eye")
return detector
def build_cascade(model_name="haarcascade"):
def build_cascade(model_name="haarcascade") -> Any:
"""
Build a opencv face&eye detector models
Returns:
model (Any)
"""
opencv_path = get_opencv_path()
if model_name == "haarcascade":
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
@ -38,7 +50,16 @@ def build_cascade(model_name="haarcascade"):
return detector
def detect_face(detector, img, align=True):
def detect_face(detector: dict, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with opencv
Args:
face_detector (Any): opencv face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
detected_face = None

View File

@ -1,21 +1,31 @@
def build_model():
from retinaface import RetinaFace # this is not a must dependency
from typing import Any
import numpy as np
from retinaface import RetinaFace
from retinaface.commons import postprocess
def build_model() -> Any:
"""
Build a retinaface detector model
Returns:
model (Any)
"""
face_detector = RetinaFace.build_model()
return face_detector
def detect_face(face_detector, img, align=True):
from retinaface import RetinaFace # this is not a must dependency
from retinaface.commons import postprocess
# ---------------------------------
def detect_face(face_detector: Any, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with retinaface
Args:
face_detector (Any): retinaface face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
# --------------------------
obj = RetinaFace.detect_faces(img, model=face_detector, threshold=0.9)
if isinstance(obj, dict):

View File

@ -2,6 +2,7 @@ import os
import gdown
import cv2
import pandas as pd
import numpy as np
from deepface.detectors import OpenCvWrapper
from deepface.commons import functions
from deepface.commons.logger import Logger
@ -11,7 +12,12 @@ logger = Logger(module="detectors.SsdWrapper")
# pylint: disable=line-too-long
def build_model():
def build_model() -> dict:
"""
Build a ssd detector model
Returns:
model (Any)
"""
home = functions.get_deepface_home()
@ -51,8 +57,16 @@ def build_model():
return detector
def detect_face(detector, img, align=True):
def detect_face(detector: dict, img: np.ndarray, align: bool = True) -> list:
"""
Detect and align face with ssd
Args:
face_detector (Any): ssd face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
detected_face = None

View File

@ -1,3 +1,5 @@
from typing import Any
import numpy as np
from deepface.detectors import FaceDetector
from deepface.commons.logger import Logger
@ -14,8 +16,12 @@ WEIGHT_URL = "https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb"
LANDMARKS_CONFIDENCE_THRESHOLD = 0.5
def build_model():
"""Build YOLO (yolov8n-face) model"""
def build_model() -> Any:
"""
Build a yolo detector model
Returns:
model (Any)
"""
import gdown
import os
@ -41,7 +47,16 @@ def build_model():
return YOLO(weight_path)
def detect_face(face_detector, img, align=False):
def detect_face(face_detector: Any, img: np.ndarray, align: bool = False) -> list:
"""
Detect and align face with yolo
Args:
face_detector (Any): yolo face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
resp = []
# Detect faces

View File

@ -1,5 +1,7 @@
import os
from typing import Any
import cv2
import numpy as np
import gdown
from deepface.detectors import FaceDetector
from deepface.commons import functions
@ -7,7 +9,13 @@ from deepface.commons.logger import Logger
logger = Logger(module="detectors.YunetWrapper")
def build_model():
def build_model() -> Any:
"""
Build a yunet detector model
Returns:
model (Any)
"""
# pylint: disable=C0301
url = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
file_name = "face_detection_yunet_2023mar.onnx"
@ -20,7 +28,18 @@ def build_model():
return face_detector
def detect_face(detector, image, align=True, score_threshold=0.9):
def detect_face(
detector: Any, image: np.ndarray, align: bool = True, score_threshold: float = 0.9
) -> list:
"""
Detect and align face with yunet
Args:
face_detector (Any): yunet face detector object
img (np.ndarray): pre-loaded image
align (bool): default is true
Returns:
list of detected and aligned faces
"""
# FaceDetector.detect_faces does not support score_threshold parameter.
# We can set it via environment variable.
score_threshold = os.environ.get("yunet_score_threshold", score_threshold)
@ -78,12 +97,8 @@ def detect_face(detector, image, align=True, score_threshold=0.9):
detected_face = image[int(y) : int(y + h), int(x) : int(x + w)]
img_region = [x, y, w, h]
if align:
detected_face = yunet_align_face(detected_face, x_re, y_re, x_le, y_le)
detected_face = FaceDetector.alignment_procedure(
detected_face, (x_re, y_re), (x_le, y_le)
)
resp.append((detected_face, img_region, confidence))
return resp
# x_re, y_re, x_le, y_le stands for the coordinates of right eye, left eye
def yunet_align_face(img, x_re, y_re, x_le, y_le):
img = FaceDetector.alignment_procedure(img, (x_re, y_re), (x_le, y_le))
return img

View File

@ -16,7 +16,7 @@ tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
else:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
@ -25,7 +25,7 @@ elif tf_version == 2:
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5",
):
) -> Model:
model = VGGFace.baseModel()
@ -60,7 +60,7 @@ def loadModel(
# --------------------------
def findApparentAge(age_predictions):
def findApparentAge(age_predictions) -> np.float64:
output_indexes = np.array(list(range(0, 101)))
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age

View File

@ -15,7 +15,7 @@ tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
elif tf_version == 2:
else:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Conv2D,
@ -33,7 +33,7 @@ labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
):
) -> Sequential:
num_classes = 7

View File

@ -17,7 +17,7 @@ tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
else:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# -------------------------------------
@ -28,7 +28,7 @@ labels = ["Woman", "Man"]
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
):
) -> Model:
model = VGGFace.baseModel()

View File

@ -16,7 +16,7 @@ tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
else:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# --------------------------
@ -26,7 +26,7 @@ labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispani
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
):
) -> Model:
model = VGGFace.baseModel()

View File

@ -9,7 +9,7 @@ def test_standard_represent():
embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"]
logger.info(f"Function returned {len(embedding)} dimensional vector")
logger.debug(f"Function returned {len(embedding)} dimensional vector")
assert len(embedding) == 2622
logger.info("✅ test standard represent function done")