mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
Merge pull request #911 from serengil/feat-task-0712-some-improvements
Feat task 0712 some improvements
This commit is contained in:
commit
5696d27e84
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@ -1,4 +1,4 @@
|
|||||||
name: Tests
|
name: Tests and Linting
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
@ -30,6 +30,8 @@ from deepface.extendedmodels import Age, Gender, Race, Emotion
|
|||||||
from deepface.commons import functions, realtime, distance as dst
|
from deepface.commons import functions, realtime, distance as dst
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
|
# pylint: disable=no-else-raise
|
||||||
|
|
||||||
logger = Logger(module="DeepFace")
|
logger = Logger(module="DeepFace")
|
||||||
|
|
||||||
# -----------------------------------
|
# -----------------------------------
|
||||||
@ -465,8 +467,16 @@ def find(
|
|||||||
file_name = f"representations_{model_name}.pkl"
|
file_name = f"representations_{model_name}.pkl"
|
||||||
file_name = file_name.replace("-", "_").lower()
|
file_name = file_name.replace("-", "_").lower()
|
||||||
|
|
||||||
if path.exists(db_path + "/" + file_name):
|
df_cols = [
|
||||||
|
"identity",
|
||||||
|
f"{model_name}_representation",
|
||||||
|
"target_x",
|
||||||
|
"target_y",
|
||||||
|
"target_w",
|
||||||
|
"target_h",
|
||||||
|
]
|
||||||
|
|
||||||
|
if path.exists(db_path + "/" + file_name):
|
||||||
if not silent:
|
if not silent:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
f"Representations for images in {db_path} folder were previously stored"
|
f"Representations for images in {db_path} folder were previously stored"
|
||||||
@ -477,6 +487,12 @@ def find(
|
|||||||
with open(f"{db_path}/{file_name}", "rb") as f:
|
with open(f"{db_path}/{file_name}", "rb") as f:
|
||||||
representations = pickle.load(f)
|
representations = pickle.load(f)
|
||||||
|
|
||||||
|
if len(representations) > 0 and len(representations[0]) != len(df_cols):
|
||||||
|
raise ValueError(
|
||||||
|
f"Seems existing {db_path}/{file_name} is out-of-the-date."
|
||||||
|
"Delete it and re-run."
|
||||||
|
)
|
||||||
|
|
||||||
if not silent:
|
if not silent:
|
||||||
logger.info(f"There are {len(representations)} representations found in {file_name}")
|
logger.info(f"There are {len(representations)} representations found in {file_name}")
|
||||||
|
|
||||||
@ -523,7 +539,7 @@ def find(
|
|||||||
align=align,
|
align=align,
|
||||||
)
|
)
|
||||||
|
|
||||||
for img_content, _, _ in img_objs:
|
for img_content, img_region, _ in img_objs:
|
||||||
embedding_obj = represent(
|
embedding_obj = represent(
|
||||||
img_path=img_content,
|
img_path=img_content,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
@ -538,6 +554,10 @@ def find(
|
|||||||
instance = []
|
instance = []
|
||||||
instance.append(employee)
|
instance.append(employee)
|
||||||
instance.append(img_representation)
|
instance.append(img_representation)
|
||||||
|
instance.append(img_region["x"])
|
||||||
|
instance.append(img_region["y"])
|
||||||
|
instance.append(img_region["w"])
|
||||||
|
instance.append(img_region["h"])
|
||||||
representations.append(instance)
|
representations.append(instance)
|
||||||
|
|
||||||
# -------------------------------
|
# -------------------------------
|
||||||
@ -553,10 +573,13 @@ def find(
|
|||||||
|
|
||||||
# ----------------------------
|
# ----------------------------
|
||||||
# now, we got representations for facial database
|
# now, we got representations for facial database
|
||||||
df = pd.DataFrame(representations, columns=["identity", f"{model_name}_representation"])
|
df = pd.DataFrame(
|
||||||
|
representations,
|
||||||
|
columns=df_cols,
|
||||||
|
)
|
||||||
|
|
||||||
# img path might have more than once face
|
# img path might have more than once face
|
||||||
target_objs = functions.extract_faces(
|
source_objs = functions.extract_faces(
|
||||||
img=img_path,
|
img=img_path,
|
||||||
target_size=target_size,
|
target_size=target_size,
|
||||||
detector_backend=detector_backend,
|
detector_backend=detector_backend,
|
||||||
@ -567,9 +590,9 @@ def find(
|
|||||||
|
|
||||||
resp_obj = []
|
resp_obj = []
|
||||||
|
|
||||||
for target_img, target_region, _ in target_objs:
|
for source_img, source_region, _ in source_objs:
|
||||||
target_embedding_obj = represent(
|
target_embedding_obj = represent(
|
||||||
img_path=target_img,
|
img_path=source_img,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
enforce_detection=enforce_detection,
|
enforce_detection=enforce_detection,
|
||||||
detector_backend="skip",
|
detector_backend="skip",
|
||||||
@ -580,10 +603,10 @@ def find(
|
|||||||
target_representation = target_embedding_obj[0]["embedding"]
|
target_representation = target_embedding_obj[0]["embedding"]
|
||||||
|
|
||||||
result_df = df.copy() # df will be filtered in each img
|
result_df = df.copy() # df will be filtered in each img
|
||||||
result_df["source_x"] = target_region["x"]
|
result_df["source_x"] = source_region["x"]
|
||||||
result_df["source_y"] = target_region["y"]
|
result_df["source_y"] = source_region["y"]
|
||||||
result_df["source_w"] = target_region["w"]
|
result_df["source_w"] = source_region["w"]
|
||||||
result_df["source_h"] = target_region["h"]
|
result_df["source_h"] = source_region["h"]
|
||||||
|
|
||||||
distances = []
|
distances = []
|
||||||
for index, instance in df.iterrows():
|
for index, instance in df.iterrows():
|
||||||
@ -815,6 +838,7 @@ def extract_faces(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
resp_objs = []
|
resp_objs = []
|
||||||
|
|
||||||
img_objs = functions.extract_faces(
|
img_objs = functions.extract_faces(
|
||||||
img=img_path,
|
img=img_path,
|
||||||
target_size=target_size,
|
target_size=target_size,
|
||||||
|
@ -16,6 +16,8 @@ from deepface.commons.logger import Logger
|
|||||||
|
|
||||||
logger = Logger(module="commons.functions")
|
logger = Logger(module="commons.functions")
|
||||||
|
|
||||||
|
# pylint: disable=no-else-raise
|
||||||
|
|
||||||
# --------------------------------------------------
|
# --------------------------------------------------
|
||||||
# configurations of dependencies
|
# configurations of dependencies
|
||||||
|
|
||||||
@ -73,49 +75,52 @@ def loadBase64Img(uri):
|
|||||||
"""
|
"""
|
||||||
encoded_data = uri.split(",")[1]
|
encoded_data = uri.split(",")[1]
|
||||||
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
||||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||||
return img
|
# img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
|
||||||
|
return img_bgr
|
||||||
|
|
||||||
|
|
||||||
def load_image(img):
|
def load_image(img):
|
||||||
"""Load image from path, url, base64 or numpy array.
|
"""
|
||||||
|
Load image from path, url, base64 or numpy array.
|
||||||
Args:
|
Args:
|
||||||
img: a path, url, base64 or numpy array.
|
img: a path, url, base64 or numpy array.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: if the image path does not exist.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
numpy array: the loaded image.
|
image (numpy array): the loaded image in BGR format
|
||||||
|
image name (str): image name itself
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# The image is already a numpy array
|
# The image is already a numpy array
|
||||||
if type(img).__module__ == np.__name__:
|
if type(img).__module__ == np.__name__:
|
||||||
return img
|
return img, None
|
||||||
|
|
||||||
# The image is a base64 string
|
# The image is a base64 string
|
||||||
if img.startswith("data:image/"):
|
if img.startswith("data:image/"):
|
||||||
return loadBase64Img(img)
|
return loadBase64Img(img), None
|
||||||
|
|
||||||
# The image is a url
|
# The image is a url
|
||||||
if img.startswith("http"):
|
if img.startswith("http"):
|
||||||
return np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB"))[
|
return (
|
||||||
:, :, ::-1
|
np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("BGR"))[
|
||||||
]
|
:, :, ::-1
|
||||||
|
],
|
||||||
|
# return url as image name
|
||||||
|
img,
|
||||||
|
)
|
||||||
|
|
||||||
# The image is a path
|
# The image is a path
|
||||||
if os.path.isfile(img) is not True:
|
if os.path.isfile(img) is not True:
|
||||||
raise ValueError(f"Confirm that {img} exists")
|
raise ValueError(f"Confirm that {img} exists")
|
||||||
|
|
||||||
# For reading images with unicode names
|
# image must be a file on the system then
|
||||||
with open(img, "rb") as img_f:
|
|
||||||
chunk = img_f.read()
|
|
||||||
chunk_arr = np.frombuffer(chunk, dtype=np.uint8)
|
|
||||||
img = cv2.imdecode(chunk_arr, cv2.IMREAD_COLOR)
|
|
||||||
return img
|
|
||||||
|
|
||||||
# This causes troubles when reading files with non english names
|
# image name must have english characters
|
||||||
# return cv2.imread(img)
|
if img.isascii() is False:
|
||||||
|
raise ValueError(f"Input image must not have non-english characters - {img}")
|
||||||
|
|
||||||
|
img_obj_bgr = cv2.imread(img)
|
||||||
|
# img_obj_rgb = cv2.cvtColor(img_obj_bgr, cv2.COLOR_BGR2RGB)
|
||||||
|
return img_obj_bgr, img
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------
|
# --------------------------------------------------
|
||||||
@ -152,7 +157,7 @@ def extract_faces(
|
|||||||
extracted_faces = []
|
extracted_faces = []
|
||||||
|
|
||||||
# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
|
# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
|
||||||
img = load_image(img)
|
img, img_name = load_image(img)
|
||||||
img_region = [0, 0, img.shape[1], img.shape[0]]
|
img_region = [0, 0, img.shape[1], img.shape[0]]
|
||||||
|
|
||||||
if detector_backend == "skip":
|
if detector_backend == "skip":
|
||||||
@ -163,10 +168,17 @@ def extract_faces(
|
|||||||
|
|
||||||
# in case of no face found
|
# in case of no face found
|
||||||
if len(face_objs) == 0 and enforce_detection is True:
|
if len(face_objs) == 0 and enforce_detection is True:
|
||||||
raise ValueError(
|
if img_name is not None:
|
||||||
"Face could not be detected. Please confirm that the picture is a face photo "
|
raise ValueError(
|
||||||
+ "or consider to set enforce_detection param to False."
|
f"Face could not be detected in {img_name}."
|
||||||
)
|
"Please confirm that the picture is a face photo "
|
||||||
|
"or consider to set enforce_detection param to False."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Face could not be detected. Please confirm that the picture is a face photo "
|
||||||
|
"or consider to set enforce_detection param to False."
|
||||||
|
)
|
||||||
|
|
||||||
if len(face_objs) == 0 and enforce_detection is False:
|
if len(face_objs) == 0 and enforce_detection is False:
|
||||||
face_objs = [(img, img_region, 0)]
|
face_objs = [(img, img_region, 0)]
|
||||||
@ -177,39 +189,38 @@ def extract_faces(
|
|||||||
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
|
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
# resize and padding
|
# resize and padding
|
||||||
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
|
factor_0 = target_size[0] / current_img.shape[0]
|
||||||
factor_0 = target_size[0] / current_img.shape[0]
|
factor_1 = target_size[1] / current_img.shape[1]
|
||||||
factor_1 = target_size[1] / current_img.shape[1]
|
factor = min(factor_0, factor_1)
|
||||||
factor = min(factor_0, factor_1)
|
|
||||||
|
|
||||||
dsize = (
|
dsize = (
|
||||||
int(current_img.shape[1] * factor),
|
int(current_img.shape[1] * factor),
|
||||||
int(current_img.shape[0] * factor),
|
int(current_img.shape[0] * factor),
|
||||||
|
)
|
||||||
|
current_img = cv2.resize(current_img, dsize)
|
||||||
|
|
||||||
|
diff_0 = target_size[0] - current_img.shape[0]
|
||||||
|
diff_1 = target_size[1] - current_img.shape[1]
|
||||||
|
if grayscale is False:
|
||||||
|
# Put the base image in the middle of the padded image
|
||||||
|
current_img = np.pad(
|
||||||
|
current_img,
|
||||||
|
(
|
||||||
|
(diff_0 // 2, diff_0 - diff_0 // 2),
|
||||||
|
(diff_1 // 2, diff_1 - diff_1 // 2),
|
||||||
|
(0, 0),
|
||||||
|
),
|
||||||
|
"constant",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
current_img = np.pad(
|
||||||
|
current_img,
|
||||||
|
(
|
||||||
|
(diff_0 // 2, diff_0 - diff_0 // 2),
|
||||||
|
(diff_1 // 2, diff_1 - diff_1 // 2),
|
||||||
|
),
|
||||||
|
"constant",
|
||||||
)
|
)
|
||||||
current_img = cv2.resize(current_img, dsize)
|
|
||||||
|
|
||||||
diff_0 = target_size[0] - current_img.shape[0]
|
|
||||||
diff_1 = target_size[1] - current_img.shape[1]
|
|
||||||
if grayscale is False:
|
|
||||||
# Put the base image in the middle of the padded image
|
|
||||||
current_img = np.pad(
|
|
||||||
current_img,
|
|
||||||
(
|
|
||||||
(diff_0 // 2, diff_0 - diff_0 // 2),
|
|
||||||
(diff_1 // 2, diff_1 - diff_1 // 2),
|
|
||||||
(0, 0),
|
|
||||||
),
|
|
||||||
"constant",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
current_img = np.pad(
|
|
||||||
current_img,
|
|
||||||
(
|
|
||||||
(diff_0 // 2, diff_0 - diff_0 // 2),
|
|
||||||
(diff_1 // 2, diff_1 - diff_1 // 2),
|
|
||||||
),
|
|
||||||
"constant",
|
|
||||||
)
|
|
||||||
|
|
||||||
# double check: if target image is not still the same size with target.
|
# double check: if target image is not still the same size with target.
|
||||||
if current_img.shape[0:2] != target_size:
|
if current_img.shape[0:2] != target_size:
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
# pylint: disable=broad-except
|
# pylint: disable=broad-except
|
||||||
class Logger:
|
class Logger:
|
||||||
@ -17,7 +18,7 @@ class Logger:
|
|||||||
|
|
||||||
def info(self, message):
|
def info(self, message):
|
||||||
if self.log_level <= logging.INFO:
|
if self.log_level <= logging.INFO:
|
||||||
self.dump_log(message)
|
self.dump_log(f"{message}")
|
||||||
|
|
||||||
def debug(self, message):
|
def debug(self, message):
|
||||||
if self.log_level <= logging.DEBUG:
|
if self.log_level <= logging.DEBUG:
|
||||||
@ -36,4 +37,4 @@ class Logger:
|
|||||||
self.dump_log(f"💥 {message}")
|
self.dump_log(f"💥 {message}")
|
||||||
|
|
||||||
def dump_log(self, message):
|
def dump_log(self, message):
|
||||||
print(message)
|
print(f"{str(datetime.now())[2:-7]} - {message}")
|
||||||
|
@ -6,11 +6,19 @@ from deepface.commons.logger import Logger
|
|||||||
|
|
||||||
logger = Logger(module="detectors.DlibWrapper")
|
logger = Logger(module="detectors.DlibWrapper")
|
||||||
|
|
||||||
|
|
||||||
def build_model():
|
def build_model():
|
||||||
|
|
||||||
home = functions.get_deepface_home()
|
home = functions.get_deepface_home()
|
||||||
|
|
||||||
import dlib # this requirement is not a must that's why imported here
|
# this is not a must dependency. do not import it in the global level.
|
||||||
|
try:
|
||||||
|
import dlib
|
||||||
|
except ModuleNotFoundError as e:
|
||||||
|
raise ImportError(
|
||||||
|
"Dlib is an optional detector, ensure the library is installed."
|
||||||
|
"Please install using 'pip install dlib' "
|
||||||
|
) from e
|
||||||
|
|
||||||
# check required file exists in the home/.deepface/weights folder
|
# check required file exists in the home/.deepface/weights folder
|
||||||
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
|
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
|
||||||
@ -40,7 +48,14 @@ def build_model():
|
|||||||
|
|
||||||
def detect_face(detector, img, align=True):
|
def detect_face(detector, img, align=True):
|
||||||
|
|
||||||
import dlib # this requirement is not a must that's why imported here
|
# this is not a must dependency. do not import it in the global level.
|
||||||
|
try:
|
||||||
|
import dlib
|
||||||
|
except ModuleNotFoundError as e:
|
||||||
|
raise ImportError(
|
||||||
|
"Dlib is an optional detector, ensure the library is installed."
|
||||||
|
"Please install using 'pip install dlib' "
|
||||||
|
) from e
|
||||||
|
|
||||||
resp = []
|
resp = []
|
||||||
|
|
||||||
|
@ -4,23 +4,27 @@ from deepface.detectors import FaceDetector
|
|||||||
# Link -> https://github.com/timesler/facenet-pytorch
|
# Link -> https://github.com/timesler/facenet-pytorch
|
||||||
# Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
|
# Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
|
||||||
|
|
||||||
|
|
||||||
def build_model():
|
def build_model():
|
||||||
# Optional dependency
|
# this is not a must dependency. do not import it in the global level.
|
||||||
try:
|
try:
|
||||||
from facenet_pytorch import MTCNN as fast_mtcnn
|
from facenet_pytorch import MTCNN as fast_mtcnn
|
||||||
except ModuleNotFoundError as e:
|
except ModuleNotFoundError as e:
|
||||||
raise ImportError("This is an optional detector, ensure the library is installed. \
|
raise ImportError(
|
||||||
Please install using 'pip install facenet-pytorch' ") from e
|
"FastMtcnn is an optional detector, ensure the library is installed."
|
||||||
|
"Please install using 'pip install facenet-pytorch' "
|
||||||
|
) from e
|
||||||
|
|
||||||
|
face_detector = fast_mtcnn(
|
||||||
face_detector = fast_mtcnn(image_size=160,
|
image_size=160,
|
||||||
thresholds=[0.6, 0.7, 0.7], # MTCNN thresholds
|
thresholds=[0.6, 0.7, 0.7], # MTCNN thresholds
|
||||||
post_process=True,
|
post_process=True,
|
||||||
device='cpu',
|
device="cpu",
|
||||||
select_largest=False, # return result in descending order
|
select_largest=False, # return result in descending order
|
||||||
)
|
)
|
||||||
return face_detector
|
return face_detector
|
||||||
|
|
||||||
|
|
||||||
def xyxy_to_xywh(xyxy):
|
def xyxy_to_xywh(xyxy):
|
||||||
"""
|
"""
|
||||||
Convert xyxy format to xywh format.
|
Convert xyxy format to xywh format.
|
||||||
@ -30,6 +34,7 @@ def xyxy_to_xywh(xyxy):
|
|||||||
h = xyxy[3] - y + 1
|
h = xyxy[3] - y + 1
|
||||||
return [x, y, w, h]
|
return [x, y, w, h]
|
||||||
|
|
||||||
|
|
||||||
def detect_face(face_detector, img, align=True):
|
def detect_face(face_detector, img, align=True):
|
||||||
|
|
||||||
resp = []
|
resp = []
|
||||||
@ -38,7 +43,9 @@ def detect_face(face_detector, img, align=True):
|
|||||||
img_region = [0, 0, img.shape[1], img.shape[0]]
|
img_region = [0, 0, img.shape[1], img.shape[0]]
|
||||||
|
|
||||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
|
||||||
detections = face_detector.detect(img_rgb, landmarks=True) # returns boundingbox, prob, landmark
|
detections = face_detector.detect(
|
||||||
|
img_rgb, landmarks=True
|
||||||
|
) # returns boundingbox, prob, landmark
|
||||||
if len(detections[0]) > 0:
|
if len(detections[0]) > 0:
|
||||||
|
|
||||||
for detection in zip(*detections):
|
for detection in zip(*detections):
|
||||||
|
@ -4,7 +4,14 @@ from deepface.detectors import FaceDetector
|
|||||||
|
|
||||||
|
|
||||||
def build_model():
|
def build_model():
|
||||||
import mediapipe as mp # this is not a must dependency. do not import it in the global level.
|
# this is not a must dependency. do not import it in the global level.
|
||||||
|
try:
|
||||||
|
import mediapipe as mp
|
||||||
|
except ModuleNotFoundError as e:
|
||||||
|
raise ImportError(
|
||||||
|
"MediaPipe is an optional detector, ensure the library is installed."
|
||||||
|
"Please install using 'pip install mediapipe' "
|
||||||
|
) from e
|
||||||
|
|
||||||
mp_face_detection = mp.solutions.face_detection
|
mp_face_detection = mp.solutions.face_detection
|
||||||
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
|
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
|
||||||
|
@ -20,9 +20,16 @@ def build_model():
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
# Import the Ultralytics YOLO model
|
# Import the Ultralytics YOLO model
|
||||||
from ultralytics import YOLO
|
try:
|
||||||
|
from ultralytics import YOLO
|
||||||
|
except ModuleNotFoundError as e:
|
||||||
|
raise ImportError(
|
||||||
|
"Yolo is an optional detector, ensure the library is installed. \
|
||||||
|
Please install using 'pip install ultralytics' "
|
||||||
|
) from e
|
||||||
|
|
||||||
from deepface.commons.functions import get_deepface_home
|
from deepface.commons.functions import get_deepface_home
|
||||||
|
|
||||||
weight_path = f"{get_deepface_home()}{PATH}"
|
weight_path = f"{get_deepface_home()}{PATH}"
|
||||||
|
|
||||||
# Download the model's weights if they don't exist
|
# Download the model's weights if they don't exist
|
||||||
@ -38,8 +45,7 @@ def detect_face(face_detector, img, align=False):
|
|||||||
resp = []
|
resp = []
|
||||||
|
|
||||||
# Detect faces
|
# Detect faces
|
||||||
results = face_detector.predict(
|
results = face_detector.predict(img, verbose=False, show=False, conf=0.25)[0]
|
||||||
img, verbose=False, show=False, conf=0.25)[0]
|
|
||||||
|
|
||||||
# For each face, extract the bounding box, the landmarks and confidence
|
# For each face, extract the bounding box, the landmarks and confidence
|
||||||
for result in results:
|
for result in results:
|
||||||
@ -48,7 +54,7 @@ def detect_face(face_detector, img, align=False):
|
|||||||
confidence = result.boxes.conf.tolist()[0]
|
confidence = result.boxes.conf.tolist()[0]
|
||||||
|
|
||||||
x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h)
|
x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h)
|
||||||
detected_face = img[y: y + h, x: x + w].copy()
|
detected_face = img[y : y + h, x : x + w].copy()
|
||||||
|
|
||||||
if align:
|
if align:
|
||||||
# Tuple of x,y and confidence for left eye
|
# Tuple of x,y and confidence for left eye
|
||||||
@ -57,8 +63,10 @@ def detect_face(face_detector, img, align=False):
|
|||||||
right_eye = result.keypoints.xy[0][1], result.keypoints.conf[0][1]
|
right_eye = result.keypoints.xy[0][1], result.keypoints.conf[0][1]
|
||||||
|
|
||||||
# Check the landmarks confidence before alignment
|
# Check the landmarks confidence before alignment
|
||||||
if (left_eye[1] > LANDMARKS_CONFIDENCE_THRESHOLD and
|
if (
|
||||||
right_eye[1] > LANDMARKS_CONFIDENCE_THRESHOLD):
|
left_eye[1] > LANDMARKS_CONFIDENCE_THRESHOLD
|
||||||
|
and right_eye[1] > LANDMARKS_CONFIDENCE_THRESHOLD
|
||||||
|
):
|
||||||
detected_face = FaceDetector.alignment_procedure(
|
detected_face = FaceDetector.alignment_procedure(
|
||||||
detected_face, left_eye[0].cpu(), right_eye[0].cpu()
|
detected_face, left_eye[0].cpu(), right_eye[0].cpu()
|
||||||
)
|
)
|
||||||
|
2
setup.py
2
setup.py
@ -8,7 +8,7 @@ with open("requirements.txt", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name="deepface",
|
name="deepface",
|
||||||
version="0.0.79",
|
version="0.0.80",
|
||||||
author="Sefik Ilkin Serengil",
|
author="Sefik Ilkin Serengil",
|
||||||
author_email="serengil@gmail.com",
|
author_email="serengil@gmail.com",
|
||||||
description="A Lightweight Face Recognition and Facial Attribute Analysis Framework (Age, Gender, Emotion, Race) for Python",
|
description="A Lightweight Face Recognition and Facial Attribute Analysis Framework (Age, Gender, Emotion, Race) for Python",
|
||||||
|
@ -9,7 +9,7 @@ from deepface.commons.logger import Logger
|
|||||||
|
|
||||||
logger = Logger()
|
logger = Logger()
|
||||||
|
|
||||||
# pylint: disable=consider-iterating-dictionary
|
# pylint: disable=consider-iterating-dictionary,broad-except
|
||||||
|
|
||||||
logger.info("-----------------------------------------")
|
logger.info("-----------------------------------------")
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ def evaluate(condition):
|
|||||||
# ------------------------------------------------
|
# ------------------------------------------------
|
||||||
|
|
||||||
detectors = ["opencv", "mtcnn"]
|
detectors = ["opencv", "mtcnn"]
|
||||||
models = ["VGG-Face", "Facenet", "ArcFace"]
|
models = ["VGG-Face", "Facenet", "Facenet512", "ArcFace"]
|
||||||
metrics = ["cosine", "euclidean", "euclidean_l2"]
|
metrics = ["cosine", "euclidean", "euclidean_l2"]
|
||||||
|
|
||||||
dataset = [
|
dataset = [
|
||||||
|
Loading…
x
Reference in New Issue
Block a user