Remove unwanted changes, and add comments.

This commit is contained in:
Vincent STRAGIER 2023-06-05 13:47:55 +02:00
parent 6c06996686
commit a380cfffbb
6 changed files with 42 additions and 27 deletions

View File

@ -9,7 +9,5 @@
"python.formatting.provider": "black",
"python.formatting.blackArgs": ["--line-length=100"],
"editor.fontWeight": "normal",
"python.analysis.extraPaths": ["./deepface"],
"stylelint.autoFixOnSave": false,
"standard.autoFixOnSave": false
"python.analysis.extraPaths": ["./deepface"]
}

View File

@ -194,7 +194,7 @@ Age model got ± 4.65 MAE; gender model got 97.44% accuracy, 96.29% precision an
**Face Detectors** - [`Demo`](https://youtu.be/GZ2p2hj2H5k)
Face detection and alignment are important early stages of a modern face recognition pipeline. Experiments show that just alignment increases the face recognition accuracy almost 1%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`SSD`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MTCNN`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/) and [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/) detectors are wrapped in deepface.
Face detection and alignment are important early stages of a modern face recognition pipeline. Experiments show that just alignment increases the face recognition accuracy almost 1%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`SSD`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MTCNN`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/), [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/) and [`YOLOv8 Face`](https://github.com/derronqi/yolov8-face) detectors are wrapped in deepface.
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/detector-portfolio-v3.jpg" width="95%" height="95%"></p>
@ -207,7 +207,8 @@ backends = [
'dlib',
'mtcnn',
'retinaface',
'mediapipe'
'mediapipe',
'yolov8n',
]
#face verification

View File

@ -41,6 +41,7 @@ if tf_version == 2:
def build_model(model_name):
"""
This function builds a deepface model
Parameters:
@ -95,6 +96,7 @@ def verify(
align=True,
normalization="base",
):
"""
This function verifies an image pair is same person or different persons. In the background,
verification function represents facial images as vectors and then calculates the similarity
@ -116,7 +118,7 @@ def verify(
This might be convenient for low resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
dlib, mediapipe or yolov8n.
align (boolean): alignment according to the eye positions.
@ -233,8 +235,9 @@ def analyze(
align=True,
silent=False,
):
"""
This function analyze facial attributes including age, gender, emotion and race.
This function analyzes facial attributes including age, gender, emotion and race.
In the background, analysis function builds convolutional neural network models to
classify age, gender, emotion and race of the input image.
@ -251,7 +254,7 @@ def analyze(
resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe.
dlib, mediapipe or yolov8n.
align (boolean): alignment according to the eye positions.
@ -297,16 +300,6 @@ def analyze(
actions = (actions,)
actions = list(actions)
if not actions:
raise ValueError("`actions` must be a list of strings.")
for action in actions:
if action not in ("emotion", "age", "gender", "race"):
raise ValueError(
f"Invalid action passed ({action})). "
"Valid actions are `emotion`, `age`, `gender`, `race`."
)
# ---------------------------------
# build models
models = {}
@ -405,6 +398,7 @@ def find(
normalization="base",
silent=False,
):
"""
This function applies verification several times and find the identities in a database
@ -427,10 +421,10 @@ def find(
resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
dlib, mediapipe or yolov8n.
align (boolean): alignment according to the eye positions.
normalization (string): normalize the input image before feeding to model
silent (boolean): disable some logging and progress bars
@ -454,6 +448,7 @@ def find(
file_name = file_name.replace("-", "_").lower()
if path.exists(db_path + "/" + file_name):
if not silent:
print(
f"WARNING: Representations for images in {db_path} folder were previously stored"
@ -621,6 +616,7 @@ def represent(
align=True,
normalization="base",
):
"""
This function represents facial images as vectors. The function uses convolutional neural
networks models to generate vector embeddings.
@ -638,7 +634,7 @@ def represent(
This might be convenient for low resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
dlib, mediapipe or yolov8n.
align (boolean): alignment according to the eye positions.
@ -714,6 +710,7 @@ def stream(
time_threshold=5,
frame_threshold=5,
):
"""
This function applies real time face recognition and facial attribute analysis
@ -723,7 +720,7 @@ def stream(
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
ArcFace, SFace
detector_backend (string): opencv, retinaface, mtcnn, ssd, dlib or mediapipe
detector_backend (string): opencv, retinaface, mtcnn, ssd, dlib, mediapipe or yolov8n.
distance_metric (string): cosine, euclidean, euclidean_l2
@ -768,6 +765,7 @@ def extract_faces(
align=True,
grayscale=False,
):
"""
This function applies pre-processing stages of a face recognition pipeline
including detection and alignment
@ -832,7 +830,7 @@ def detectFace(
):
"""
Deprecated function. Use extract_faces for same functionality.
This function applies pre-processing stages of a face recognition pipeline
including detection and alignment
@ -857,7 +855,7 @@ def detectFace(
Returns:
detected and aligned face as numpy array
"""
print("⚠️ Function detectFace is deprecated. Use extract_faces instead.")
face_objs = extract_faces(

View File

@ -47,6 +47,9 @@ def detect_face(face_detector, detector_backend, img, align=True):
if len(obj) > 0:
face, region, confidence = obj[0] # discard multiple faces
# If no face detected, return, set face to None,
# image region to full image, confidence to 0
else: # len(obj) == 0
face = None
region = [0, 0, img.shape[1], img.shape[0]]

View File

@ -44,6 +44,7 @@ def detect_face(detector, img, align=True):
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
# Initialize faces and scores to empty lists
faces = []
scores = []
try:
@ -53,12 +54,16 @@ def detect_face(detector, img, align=True):
faces, _, scores = detector["face_detector"].detectMultiScale3(
img, 1.1, 10, outputRejectLevels=True
)
# except alone is too broad and will catch keyboard interrupts
# Exception should be changed to something more specific in the future
except Exception: # pylint: disable=broad-except
# except alone is too broad and will catch keyboard interrupts
import traceback
print(traceback.format_exc())
# For each face and associated score, append face,
# bounding box, and score to resp
for (x, y, w, h), confidence in zip(faces, scores):
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]

View File

@ -1,13 +1,16 @@
from deepface.detectors import FaceDetector
# Models names and paths
PATHS = {
"yolov8-lite-t": "/.deepface/weights/yolov8-lite-t.pt",
"yolov8-lite-s": "/.deepface/weights/yolov8-lite-s.pt",
"yolov8n": "/.deepface/weights/yolov8n-face.pt",
}
# Google Drive base URL
BASE_URL = "https://drive.google.com/uc?id="
# Models' Google Drive IDs
IDS = {
"yolov8-lite-t": "1vFMGW8xtRVo9bfC9yJVWWGY7vVxbLh94",
"yolov8-lite-s": "1ckpBT8KfwURTvTm5pa-cMC89A0V5jbaq",
@ -19,20 +22,24 @@ def build_model(model: str):
"""Function factory for YOLO models"""
from deepface.commons.functions import get_deepface_home
# Get model's weights path and Google Drive URL
func_weights_path = f"{get_deepface_home()}{PATHS[model]}"
func_url = f"{BASE_URL}{IDS[model]}"
# Define function to build the model
def _build_model(weights_path: str = func_weights_path, url: str = func_url):
import gdown
import os
# Import the Ultralytics YOLO model
from ultralytics import YOLO
# Download the model's weights if they don't exist
if not os.path.isfile(weights_path):
gdown.download(url, weights_path, quiet=False)
print(f"Downloaded YOLO model {os.path.basename(PATHS[model])}")
# return face_detector
# Return face_detector
return YOLO(weights_path)
return _build_model
@ -41,9 +48,12 @@ def build_model(model: str):
def detect_face(face_detector, img, align=False):
resp = []
# Detect faces
results = face_detector.predict(img, verbose=False, show=False, conf=0.25)[0]
# For each face, extract the bounding box, the landmarks and confidence
for result in results:
# Extract the bounding box and the confidence
x, y, w, h = result.boxes.xywh.tolist()[0]
confidence = result.boxes.conf.tolist()[0]