Merge pull request #907 from serengil/feat-task-0612-logger

feat-task-654-use-custom-logger
This commit is contained in:
Sefik Ilkin Serengil 2023-12-07 09:14:40 +00:00 committed by GitHub
commit 0b22c5482d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 225 additions and 115 deletions

5
Makefile Normal file
View File

@ -0,0 +1,5 @@
test:
cd tests && python -m pytest unit_tests.py -s --disable-warnings
lint:
python -m pylint deepface/ --fail-under=10

View File

@ -28,6 +28,9 @@ from deepface.basemodels import (
) )
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst from deepface.commons import functions, realtime, distance as dst
from deepface.commons.logger import Logger
logger = Logger(module="DeepFace")
# ----------------------------------- # -----------------------------------
# configurations for dependencies # configurations for dependencies
@ -340,7 +343,11 @@ def analyze(
if img_content.shape[0] > 0 and img_content.shape[1] > 0: if img_content.shape[0] > 0 and img_content.shape[1] > 0:
obj = {} obj = {}
# facial attribute analysis # facial attribute analysis
pbar = tqdm(range(0, len(actions)), desc="Finding actions", disable=silent) pbar = tqdm(
range(0, len(actions)),
desc="Finding actions",
disable=silent if len(actions) > 1 else True,
)
for index in pbar: for index in pbar:
action = actions[index] action = actions[index]
pbar.set_description(f"Action: {action}") pbar.set_description(f"Action: {action}")
@ -461,17 +468,17 @@ def find(
if path.exists(db_path + "/" + file_name): if path.exists(db_path + "/" + file_name):
if not silent: if not silent:
print( logger.warn(
f"WARNING: Representations for images in {db_path} folder were previously stored" f"Representations for images in {db_path} folder were previously stored"
+ f" in {file_name}. If you added new instances after the creation, then please " f" in {file_name}. If you added new instances after the creation, then please "
+ "delete this file and call find function again. It will create it again." "delete this file and call find function again. It will create it again."
) )
with open(f"{db_path}/{file_name}", "rb") as f: with open(f"{db_path}/{file_name}", "rb") as f:
representations = pickle.load(f) representations = pickle.load(f)
if not silent: if not silent:
print("There are ", len(representations), " representations found in ", file_name) logger.info(f"There are {len(representations)} representations found in {file_name}")
else: # create representation.pkl from scratch else: # create representation.pkl from scratch
employees = [] employees = []
@ -539,7 +546,7 @@ def find(
pickle.dump(representations, f) pickle.dump(representations, f)
if not silent: if not silent:
print( logger.info(
f"Representations stored in {db_path}/{file_name} file." f"Representations stored in {db_path}/{file_name} file."
+ "Please delete this file when you add new identities in your database." + "Please delete this file when you add new identities in your database."
) )
@ -614,7 +621,7 @@ def find(
toc = time.time() toc = time.time()
if not silent: if not silent:
print("find function lasts ", toc - tic, " seconds") logger.info(f"find function lasts {toc - tic} seconds")
return resp_obj return resp_obj
@ -869,7 +876,7 @@ def detectFace(
detected and aligned face as numpy array detected and aligned face as numpy array
""" """
print("⚠️ Function detectFace is deprecated. Use extract_faces instead.") logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
face_objs = extract_faces( face_objs = extract_faces(
img_path=img_path, img_path=img_path,
target_size=target_size, target_size=target_size,

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.ArcFace")
# pylint: disable=unsubscriptable-object # pylint: disable=unsubscriptable-object
@ -71,7 +74,7 @@ def loadModel(
if os.path.isfile(output) != True: if os.path.isfile(output) != True:
print(file_name, " will be downloaded to ", output) logger.info(f"{file_name} will be downloaded to {output}")
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)
# --------------------------------------- # ---------------------------------------

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.DeepID")
tf_version = int(tf.__version__.split(".", maxsplit=1)[0]) tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
@ -71,7 +74,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True:
print("deepid_keras_weights.h5 will be downloaded...") logger.info("deepid_keras_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/deepid_keras_weights.h5" output = home + "/.deepface/weights/deepid_keras_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -3,6 +3,9 @@ import bz2
import gdown import gdown
import numpy as np import numpy as np
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.DlibResNet")
# pylint: disable=too-few-public-methods # pylint: disable=too-few-public-methods
@ -24,7 +27,7 @@ class DlibResNet:
# download pre-trained model if it does not exist # download pre-trained model if it does not exist
if os.path.isfile(weight_file) != True: if os.path.isfile(weight_file) != True:
print("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded") logger.info("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2" file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2"
url = f"http://dlib.net/files/{file_name}" url = f"http://dlib.net/files/{file_name}"

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.Facenet")
# -------------------------------- # --------------------------------
# dependency configuration # dependency configuration
@ -1628,7 +1631,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/facenet_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/facenet_weights.h5") != True:
print("facenet_weights.h5 will be downloaded...") logger.info("facenet_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/facenet_weights.h5" output = home + "/.deepface/weights/facenet_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -2,7 +2,9 @@ import os
import gdown import gdown
from deepface.basemodels import Facenet from deepface.basemodels import Facenet
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.Facenet512")
def loadModel( def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5", url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
@ -15,7 +17,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True:
print("facenet512_weights.h5 will be downloaded...") logger.info("facenet512_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/facenet512_weights.h5" output = home + "/.deepface/weights/facenet512_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -3,6 +3,9 @@ import zipfile
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.FbDeepFace")
# -------------------------------- # --------------------------------
# dependency configuration # dependency configuration
@ -57,7 +60,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True: if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...") logger.info("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip" output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.OpenFace")
tf_version = int(tf.__version__.split(".", maxsplit=1)[0]) tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1: if tf_version == 1:
@ -362,7 +365,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True:
print("openface_weights.h5 will be downloaded...") logger.info("openface_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/openface_weights.h5" output = home + "/.deepface/weights/openface_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -4,6 +4,9 @@ import cv2 as cv
import gdown import gdown
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.SFace")
# pylint: disable=line-too-long, too-few-public-methods # pylint: disable=line-too-long, too-few-public-methods
@ -44,7 +47,7 @@ def load_model(
if not os.path.isfile(file_name): if not os.path.isfile(file_name):
print("sface weights will be downloaded...") logger.info("sface weights will be downloaded...")
gdown.download(url, file_name, quiet=False) gdown.download(url, file_name, quiet=False)

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="basemodels.VGGFace")
# --------------------------------------- # ---------------------------------------
@ -95,7 +98,7 @@ def loadModel(
output = home + "/.deepface/weights/vgg_face_weights.h5" output = home + "/.deepface/weights/vgg_face_weights.h5"
if os.path.isfile(output) != True: if os.path.isfile(output) != True:
print("vgg_face_weights.h5 will be downloaded...") logger.info("vgg_face_weights.h5 will be downloaded...")
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)
# ----------------------------------- # -----------------------------------

View File

@ -12,7 +12,9 @@ from deprecated import deprecated
# package dependencies # package dependencies
from deepface.detectors import FaceDetector from deepface.detectors import FaceDetector
from deepface.commons.logger import Logger
logger = Logger(module="commons.functions")
# -------------------------------------------------- # --------------------------------------------------
# configurations of dependencies # configurations of dependencies
@ -41,11 +43,11 @@ def initialize_folder():
if not os.path.exists(deepFaceHomePath): if not os.path.exists(deepFaceHomePath):
os.makedirs(deepFaceHomePath, exist_ok=True) os.makedirs(deepFaceHomePath, exist_ok=True)
print("Directory ", home, "/.deepface created") logger.info(f"Directory {home}/.deepface created")
if not os.path.exists(weightsPath): if not os.path.exists(weightsPath):
os.makedirs(weightsPath, exist_ok=True) os.makedirs(weightsPath, exist_ok=True)
print("Directory ", home, "/.deepface/weights created") logger.info(f"Directory {home}/.deepface/weights created")
def get_deepface_home(): def get_deepface_home():
@ -115,6 +117,7 @@ def load_image(img):
# This causes troubles when reading files with non english names # This causes troubles when reading files with non english names
# return cv2.imread(img) # return cv2.imread(img)
# -------------------------------------------------- # --------------------------------------------------
@ -357,7 +360,7 @@ def preprocess_face(
Deprecated: Deprecated:
0.0.78: Use extract_faces instead of preprocess_face. 0.0.78: Use extract_faces instead of preprocess_face.
""" """
print("⚠️ Function preprocess_face is deprecated. Use extract_faces instead.") logger.warn("Function preprocess_face is deprecated. Use extract_faces instead.")
result = None result = None
img_objs = extract_faces( img_objs = extract_faces(
img=img, img=img,

View File

@ -0,0 +1,39 @@
import os
import logging
# pylint: disable=broad-except
class Logger:
def __init__(self, module=None):
self.module = module
log_level = os.environ.get("DEEPFACE_LOG_LEVEL", str(logging.INFO))
try:
self.log_level = int(log_level)
except Exception as err:
self.dump_log(
f"Exception while parsing $DEEPFACE_LOG_LEVEL."
f"Expected int but it is {log_level} ({str(err)})"
)
self.log_level = logging.INFO
def info(self, message):
if self.log_level <= logging.INFO:
self.dump_log(message)
def debug(self, message):
if self.log_level <= logging.DEBUG:
self.dump_log(f"🕷️ {message}")
def warn(self, message):
if self.log_level <= logging.WARNING:
self.dump_log(f"⚠️ {message}")
def error(self, message):
if self.log_level <= logging.ERROR:
self.dump_log(f"🔴 {message}")
def critical(self, message):
if self.log_level <= logging.CRITICAL:
self.dump_log(f"💥 {message}")
def dump_log(self, message):
print(message)

View File

@ -5,6 +5,9 @@ import pandas as pd
import cv2 import cv2
from deepface import DeepFace from deepface import DeepFace
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="commons.realtime")
# dependency configuration # dependency configuration
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
@ -35,15 +38,15 @@ def analysis(
# build models once to store them in the memory # build models once to store them in the memory
# otherwise, they will be built after cam started and this will cause delays # otherwise, they will be built after cam started and this will cause delays
DeepFace.build_model(model_name=model_name) DeepFace.build_model(model_name=model_name)
print(f"facial recognition model {model_name} is just built") logger.info(f"facial recognition model {model_name} is just built")
if enable_face_analysis: if enable_face_analysis:
DeepFace.build_model(model_name="Age") DeepFace.build_model(model_name="Age")
print("Age model is just built") logger.info("Age model is just built")
DeepFace.build_model(model_name="Gender") DeepFace.build_model(model_name="Gender")
print("Gender model is just built") logger.info("Gender model is just built")
DeepFace.build_model(model_name="Emotion") DeepFace.build_model(model_name="Emotion")
print("Emotion model is just built") logger.info("Emotion model is just built")
# ----------------------- # -----------------------
# call a dummy find function for db_path once to create embeddings in the initialization # call a dummy find function for db_path once to create embeddings in the initialization
DeepFace.find( DeepFace.find(
@ -300,7 +303,7 @@ def analysis(
apparent_age = demography["age"] apparent_age = demography["age"]
dominant_gender = demography["dominant_gender"] dominant_gender = demography["dominant_gender"]
gender = "M" if dominant_gender == "Man" else "W" gender = "M" if dominant_gender == "Man" else "W"
# print(f"{apparent_age} years old {dominant_emotion}") logger.debug(f"{apparent_age} years old {dominant_gender}")
analysis_report = str(int(apparent_age)) + " " + gender analysis_report = str(int(apparent_age)) + " " + gender
# ------------------------------- # -------------------------------
@ -675,7 +678,7 @@ def analysis(
1, 1,
) )
except Exception as err: # pylint: disable=broad-except except Exception as err: # pylint: disable=broad-except
print(str(err)) logger.error(str(err))
tic = time.time() # in this way, freezed image can show 5 seconds tic = time.time() # in this way, freezed image can show 5 seconds

View File

@ -2,7 +2,9 @@ import os
import bz2 import bz2
import gdown import gdown
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="detectors.DlibWrapper")
def build_model(): def build_model():
@ -14,7 +16,7 @@ def build_model():
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True: if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
file_name = "shape_predictor_5_face_landmarks.dat.bz2" file_name = "shape_predictor_5_face_landmarks.dat.bz2"
print(f"{file_name} is going to be downloaded") logger.info(f"{file_name} is going to be downloaded")
url = f"http://dlib.net/files/{file_name}" url = f"http://dlib.net/files/{file_name}"
output = f"{home}/.deepface/weights/{file_name}" output = f"{home}/.deepface/weights/{file_name}"

View File

@ -4,6 +4,9 @@ import cv2
import pandas as pd import pandas as pd
from deepface.detectors import OpenCvWrapper from deepface.detectors import OpenCvWrapper
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="detectors.SsdWrapper")
# pylint: disable=line-too-long # pylint: disable=line-too-long
@ -15,7 +18,7 @@ def build_model():
# model structure # model structure
if os.path.isfile(home + "/.deepface/weights/deploy.prototxt") != True: if os.path.isfile(home + "/.deepface/weights/deploy.prototxt") != True:
print("deploy.prototxt will be downloaded...") logger.info("deploy.prototxt will be downloaded...")
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt" url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
@ -26,7 +29,7 @@ def build_model():
# pre-trained weights # pre-trained weights
if os.path.isfile(home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel") != True: if os.path.isfile(home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel") != True:
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...") logger.info("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel" url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"

View File

@ -1,4 +1,7 @@
from deepface.detectors import FaceDetector from deepface.detectors import FaceDetector
from deepface.commons.logger import Logger
logger = Logger()
# Model's weights paths # Model's weights paths
PATH = "/.deepface/weights/yolov8n-face.pt" PATH = "/.deepface/weights/yolov8n-face.pt"
@ -25,7 +28,7 @@ def build_model():
# Download the model's weights if they don't exist # Download the model's weights if they don't exist
if not os.path.isfile(weight_path): if not os.path.isfile(weight_path):
gdown.download(WEIGHT_URL, weight_path, quiet=False) gdown.download(WEIGHT_URL, weight_path, quiet=False)
print(f"Downloaded YOLO model {os.path.basename(weight_path)}") logger.info(f"Downloaded YOLO model {os.path.basename(weight_path)}")
# Return face_detector # Return face_detector
return YOLO(weight_path) return YOLO(weight_path)

View File

@ -3,7 +3,9 @@ import cv2
import gdown import gdown
from deepface.detectors import FaceDetector from deepface.detectors import FaceDetector
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="detectors.YunetWrapper")
def build_model(): def build_model():
# pylint: disable=C0301 # pylint: disable=C0301
@ -11,7 +13,7 @@ def build_model():
file_name = "face_detection_yunet_2023mar.onnx" file_name = "face_detection_yunet_2023mar.onnx"
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + f"/.deepface/weights/{file_name}") is False: if os.path.isfile(home + f"/.deepface/weights/{file_name}") is False:
print(f"{file_name} will be downloaded...") logger.info(f"{file_name} will be downloaded...")
output = home + f"/.deepface/weights/{file_name}" output = home + f"/.deepface/weights/{file_name}"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)
face_detector = cv2.FaceDetectorYN_create(home + f"/.deepface/weights/{file_name}", "", (0, 0)) face_detector = cv2.FaceDetectorYN_create(home + f"/.deepface/weights/{file_name}", "", (0, 0))

View File

@ -4,6 +4,9 @@ import numpy as np
import tensorflow as tf import tensorflow as tf
from deepface.basemodels import VGGFace from deepface.basemodels import VGGFace
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="extendedmodels.Age")
# ---------------------------------------- # ----------------------------------------
# dependency configurations # dependency configurations
@ -45,7 +48,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/age_model_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/age_model_weights.h5") != True:
print("age_model_weights.h5 will be downloaded...") logger.info("age_model_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/age_model_weights.h5" output = home + "/.deepface/weights/age_model_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -2,6 +2,9 @@ import os
import gdown import gdown
import tensorflow as tf import tensorflow as tf
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="extendedmodels.Emotion")
# ------------------------------------------- # -------------------------------------------
# pylint: disable=line-too-long # pylint: disable=line-too-long
@ -65,7 +68,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/facial_expression_model_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/facial_expression_model_weights.h5") != True:
print("facial_expression_model_weights.h5 will be downloaded...") logger.info("facial_expression_model_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/facial_expression_model_weights.h5" output = home + "/.deepface/weights/facial_expression_model_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -3,6 +3,9 @@ import gdown
import tensorflow as tf import tensorflow as tf
from deepface.basemodels import VGGFace from deepface.basemodels import VGGFace
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="extendedmodels.Gender")
# ------------------------------------- # -------------------------------------
# pylint: disable=line-too-long # pylint: disable=line-too-long
@ -48,7 +51,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/gender_model_weights.h5") != True: if os.path.isfile(home + "/.deepface/weights/gender_model_weights.h5") != True:
print("gender_model_weights.h5 will be downloaded...") logger.info("gender_model_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/gender_model_weights.h5" output = home + "/.deepface/weights/gender_model_weights.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -3,6 +3,9 @@ import gdown
import tensorflow as tf import tensorflow as tf
from deepface.basemodels import VGGFace from deepface.basemodels import VGGFace
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger(module="extendedmodels.Race")
# -------------------------- # --------------------------
# pylint: disable=line-too-long # pylint: disable=line-too-long
@ -46,7 +49,7 @@ def loadModel(
home = functions.get_deepface_home() home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/race_model_single_batch.h5") != True: if os.path.isfile(home + "/.deepface/weights/race_model_single_batch.h5") != True:
print("race_model_single_batch.h5 will be downloaded...") logger.info("race_model_single_batch.h5 will be downloaded...")
output = home + "/.deepface/weights/race_model_single_batch.h5" output = home + "/.deepface/weights/race_model_single_batch.h5"
gdown.download(url, output, quiet=False) gdown.download(url, output, quiet=False)

View File

@ -2,6 +2,9 @@ import matplotlib.pyplot as plt
import numpy as np import numpy as np
from deepface import DeepFace from deepface import DeepFace
from deepface.commons import functions from deepface.commons import functions
from deepface.commons.logger import Logger
logger = Logger()
# ---------------------------------------------- # ----------------------------------------------
# build face recognition model # build face recognition model
@ -12,7 +15,7 @@ model = DeepFace.build_model(model_name=model_name)
target_size = functions.find_target_size(model_name) target_size = functions.find_target_size(model_name)
print(f"target_size: {target_size}") logger.info(f"target_size: {target_size}")
# ---------------------------------------------- # ----------------------------------------------
# load images and find embeddings # load images and find embeddings
@ -29,10 +32,10 @@ img2_representation = model.predict(img2)[0, :]
# distance between two images # distance between two images
distance_vector = np.square(img1_representation - img2_representation) distance_vector = np.square(img1_representation - img2_representation)
# print(distance_vector) logger.debug(distance_vector)
distance = np.sqrt(distance_vector.sum()) distance = np.sqrt(distance_vector.sum())
print("Euclidean distance: ", distance) logger.info(f"Euclidean distance: {distance}")
# ---------------------------------------------- # ----------------------------------------------
# expand vectors to be shown better in graph # expand vectors to be shown better in graph

View File

@ -5,10 +5,13 @@ import numpy as np
import pandas as pd import pandas as pd
import cv2 import cv2
from deepface import DeepFace from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger()
# pylint: disable=consider-iterating-dictionary # pylint: disable=consider-iterating-dictionary
print("-----------------------------------------") logger.info("-----------------------------------------")
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
@ -20,9 +23,9 @@ if tf_major_version == 2:
tf.get_logger().setLevel(logging.ERROR) tf.get_logger().setLevel(logging.ERROR)
print("Running unit tests for TF ", tf.__version__) logger.info("Running unit tests for TF " + tf.__version__)
print("-----------------------------------------") logger.info("-----------------------------------------")
expected_coverage = 97 expected_coverage = 97
num_cases = 0 num_cases = 0
@ -58,12 +61,12 @@ dataset = [
["dataset/img6.jpg", "dataset/img9.jpg", False], ["dataset/img6.jpg", "dataset/img9.jpg", False],
] ]
print("-----------------------------------------") logger.info("-----------------------------------------")
def test_cases(): def test_cases():
print("Enforce detection test") logger.info("Enforce detection test")
black_img = np.zeros([224, 224, 3]) black_img = np.zeros([224, 224, 3])
# enforce detection on for represent # enforce detection on for represent
@ -96,7 +99,7 @@ def test_cases():
assert isinstance(objs[0]["embedding"], list) assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face
except Exception as err: except Exception as err:
print(f"Unexpected exception thrown: {str(err)}") logger.error(f"Unexpected exception thrown: {str(err)}")
exception_thrown = True exception_thrown = True
assert exception_thrown is False assert exception_thrown is False
@ -118,15 +121,15 @@ def test_cases():
assert isinstance(obj, dict) assert isinstance(obj, dict)
exception_thrown = False exception_thrown = False
except Exception as err: except Exception as err:
print(f"Unexpected exception thrown: {str(err)}") logger.error(f"Unexpected exception thrown: {str(err)}")
exception_thrown = True exception_thrown = True
assert exception_thrown is False assert exception_thrown is False
# ------------------------------------------- # -------------------------------------------
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Extract faces test") logger.info("Extract faces test")
for detector in detectors: for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector) img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector)
@ -142,23 +145,23 @@ def test_cases():
img = img_obj["face"] img = img_obj["face"]
evaluate(img.shape[0] > 0 and img.shape[1] > 0) evaluate(img.shape[0] > 0 and img.shape[1] > 0)
print(detector, " test is done") logger.info(f"{detector} test is done")
print("-----------------------------------------") logger.info("-----------------------------------------")
img_path = "dataset/img1.jpg" img_path = "dataset/img1.jpg"
embedding_objs = DeepFace.represent(img_path) embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs: for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"] embedding = embedding_obj["embedding"]
print("Function returned ", len(embedding), "dimensional vector") logger.info(f"Function returned {len(embedding)} dimensional vector")
evaluate(len(embedding) == 2622) evaluate(len(embedding) == 2622)
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Different face detectors on verification test") logger.info("Different face detectors on verification test")
for detector in detectors: for detector in detectors:
print(detector + " detector") logger.info(detector + " detector")
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend=detector) res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend=detector)
assert isinstance(res, dict) assert isinstance(res, dict)
@ -181,67 +184,72 @@ def test_cases():
assert "w" in res["facial_areas"]["img2"].keys() assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys() assert "h" in res["facial_areas"]["img2"].keys()
print(res) logger.info(res)
evaluate(res["verified"] == dataset[0][2]) evaluate(res["verified"] == dataset[0][2])
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Find function test") logger.info("Find function test")
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset") dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
for df in dfs: for df in dfs:
assert isinstance(df, pd.DataFrame) assert isinstance(df, pd.DataFrame)
print(df.head()) logger.info(df.head())
evaluate(df.shape[0] > 0) evaluate(df.shape[0] > 0)
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Facial analysis test. Passing nothing as an action") logger.info("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg" img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img) demography_objs = DeepFace.analyze(img)
for demography in demography_objs: for demography in demography_objs:
print(demography) logger.info(demography)
evaluate(demography["age"] > 20 and demography["age"] < 40) evaluate(demography["age"] > 20 and demography["age"] < 40)
evaluate(demography["dominant_gender"] == "Woman") evaluate(demography["dominant_gender"] == "Woman")
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Facial analysis test. Passing all to the action") logger.info("Facial analysis test. Passing all to the action")
demography_objs = DeepFace.analyze(img, ["age", "gender", "race", "emotion"]) demography_objs = DeepFace.analyze(img, ["age", "gender", "race", "emotion"])
for demography in demography_objs: for demography in demography_objs:
# print(f"Demography: {demography}") logger.debug(f"Demography: {demography}")
# check response is a valid json # check response is a valid json
print("Age: ", demography["age"]) age = demography["age"]
print("Gender: ", demography["dominant_gender"]) gender = demography["dominant_gender"]
print("Race: ", demography["dominant_race"]) race = demography["dominant_race"]
print("Emotion: ", demography["dominant_emotion"]) emotion = demography["dominant_emotion"]
logger.info(f"Age: {age}")
logger.info(f"Gender: {gender}")
logger.info(f"Race: {race}")
logger.info(f"Emotion: {emotion}")
evaluate(demography.get("age") is not None) evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None) evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is not None) evaluate(demography.get("dominant_race") is not None)
evaluate(demography.get("dominant_emotion") is not None) evaluate(demography.get("dominant_emotion") is not None)
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Facial analysis test 2. Remove some actions and check they are not computed") logger.info("Facial analysis test 2. Remove some actions and check they are not computed")
demography_objs = DeepFace.analyze(img, ["age", "gender"]) demography_objs = DeepFace.analyze(img, ["age", "gender"])
for demography in demography_objs: for demography in demography_objs:
print("Age: ", demography.get("age")) age = demography["age"]
print("Gender: ", demography.get("dominant_gender")) gender = demography["dominant_gender"]
print("Race: ", demography.get("dominant_race"))
print("Emotion: ", demography.get("dominant_emotion")) logger.info(f"Age: { age }")
logger.info(f"Gender: {gender}")
evaluate(demography.get("age") is not None) evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None) evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is None) evaluate(demography.get("dominant_race") is None)
evaluate(demography.get("dominant_emotion") is None) evaluate(demography.get("dominant_emotion") is None)
print("-----------------------------------------") logger.info("-----------------------------------------")
print("Facial recognition tests") logger.info("Facial recognition tests")
for model in models: for model in models:
for metric in metrics: for metric in metrics:
@ -270,64 +278,55 @@ def test_cases():
else: else:
classified_label = "unverified" classified_label = "unverified"
print( img1_alias = img1.split("/", maxsplit=1)[-1]
img1.split("/", maxsplit=1)[-1], img2_alias = img2.split("/", maxsplit=1)[-1]
"-",
img2.split("/", maxsplit=1)[-1], logger.info(
classified_label, f"{img1_alias} - {img2_alias}"
"as same person based on", f". {classified_label} as same person based on {model} and {metric}"
model, f". Distance: {distance}, Threshold:{threshold} ({test_result_label})",
"and",
metric,
". Distance:",
distance,
", Threshold:",
threshold,
"(",
test_result_label,
")",
) )
print("--------------------------") logger.info("--------------------------")
# ----------------------------------------- # -----------------------------------------
print("Passing numpy array to analyze function") logger.info("Passing numpy array to analyze function")
img = cv2.imread("dataset/img1.jpg") img = cv2.imread("dataset/img1.jpg")
resp_objs = DeepFace.analyze(img) resp_objs = DeepFace.analyze(img)
for resp_obj in resp_objs: for resp_obj in resp_objs:
print(resp_obj) logger.info(resp_obj)
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40) evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
evaluate(resp_obj["gender"] == "Woman") evaluate(resp_obj["gender"] == "Woman")
print("--------------------------") logger.info("--------------------------")
print("Passing numpy array to verify function") logger.info("Passing numpy array to verify function")
img1 = cv2.imread("dataset/img1.jpg") img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg") img2 = cv2.imread("dataset/img2.jpg")
res = DeepFace.verify(img1, img2) res = DeepFace.verify(img1, img2)
print(res) logger.info(res)
evaluate(res["verified"] == True) evaluate(res["verified"] == True)
print("--------------------------") logger.info("--------------------------")
print("Passing numpy array to find function") logger.info("Passing numpy array to find function")
img1 = cv2.imread("dataset/img1.jpg") img1 = cv2.imread("dataset/img1.jpg")
dfs = DeepFace.find(img1, db_path="dataset") dfs = DeepFace.find(img1, db_path="dataset")
for df in dfs: for df in dfs:
print(df.head()) logger.info(df.head())
evaluate(df.shape[0] > 0) evaluate(df.shape[0] > 0)
print("--------------------------") logger.info("--------------------------")
print("non-binary gender tests") logger.info("non-binary gender tests")
# interface validation - no need to call evaluate here # interface validation - no need to call evaluate here
@ -338,7 +337,7 @@ def test_cases():
) )
for result in results: for result in results:
print(result) logger.info(result)
assert "gender" in result.keys() assert "gender" in result.keys()
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [ assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
@ -356,16 +355,16 @@ def test_cases():
test_cases() test_cases()
print("num of test cases run: " + str(num_cases)) logger.info("num of test cases run: " + str(num_cases))
print("succeeded test cases: " + str(succeed_cases)) logger.info("succeeded test cases: " + str(succeed_cases))
test_score = (100 * succeed_cases) / num_cases test_score = (100 * succeed_cases) / num_cases
print("test coverage: " + str(test_score)) logger.info("test coverage: " + str(test_score))
if test_score > expected_coverage: if test_score > expected_coverage:
print("well done! min required test coverage is satisfied") logger.info("well done! min required test coverage is satisfied")
else: else:
print("min required test coverage is NOT satisfied") logger.info("min required test coverage is NOT satisfied")
assert test_score > expected_coverage assert test_score > expected_coverage

View File

@ -1,5 +1,8 @@
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from deepface import DeepFace from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger()
model_names = [ model_names = [
"VGG-Face", "VGG-Face",
@ -19,22 +22,22 @@ for model_name in model_names:
obj = DeepFace.verify( obj = DeepFace.verify(
img1_path="dataset/img1.jpg", img2_path="dataset/img2.jpg", model_name=model_name img1_path="dataset/img1.jpg", img2_path="dataset/img2.jpg", model_name=model_name
) )
print(obj) logger.info(obj)
print("---------------------") logger.info("---------------------")
# represent # represent
for model_name in model_names: for model_name in model_names:
embedding_objs = DeepFace.represent(img_path="dataset/img1.jpg", model_name=model_name) embedding_objs = DeepFace.represent(img_path="dataset/img1.jpg", model_name=model_name)
for embedding_obj in embedding_objs: for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"] embedding = embedding_obj["embedding"]
print(f"{model_name} produced {len(embedding)}D vector") logger.info(f"{model_name} produced {len(embedding)}D vector")
# find # find
dfs = DeepFace.find( dfs = DeepFace.find(
img_path="dataset/img1.jpg", db_path="dataset", model_name="Facenet", detector_backend="mtcnn" img_path="dataset/img1.jpg", db_path="dataset", model_name="Facenet", detector_backend="mtcnn"
) )
for df in dfs: for df in dfs:
print(df) logger.info(df)
# extract faces # extract faces
for detector_backend in detector_backends: for detector_backend in detector_backends:
@ -43,8 +46,8 @@ for detector_backend in detector_backends:
) )
for face_obj in face_objs: for face_obj in face_objs:
face = face_obj["face"] face = face_obj["face"]
print(detector_backend) logger.info(detector_backend)
plt.imshow(face) plt.imshow(face)
plt.axis("off") plt.axis("off")
plt.show() plt.show()
print("-----------") logger.info("-----------")