Merge pull request #932 from serengil/feat-task-2312-unit-tests

Arranging Unit Tests
This commit is contained in:
Sefik Ilkin Serengil 2023-12-24 13:14:11 +00:00 committed by GitHub
commit 58945bd14d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 375 additions and 407 deletions

View File

@ -42,7 +42,7 @@ jobs:
- name: Test with pytest - name: Test with pytest
run: | run: |
cd tests cd tests
pytest unit_tests.py python -m pytest . -s --disable-warnings
linting: linting:
needs: unit-tests needs: unit-tests

View File

@ -1,5 +1,5 @@
test: test:
cd tests && python -m pytest unit_tests.py -s --disable-warnings cd tests && python -m pytest . -s --disable-warnings
lint: lint:
python -m pylint deepface/ --fail-under=10 python -m pylint deepface/ --fail-under=10

View File

@ -31,7 +31,7 @@ from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst from deepface.commons import functions, realtime, distance as dst
from deepface.commons.logger import Logger from deepface.commons.logger import Logger
# pylint: disable=no-else-raise # pylint: disable=no-else-raise, simplifiable-if-expression
logger = Logger(module="DeepFace") logger = Logger(module="DeepFace")
@ -221,7 +221,7 @@ def verify(
toc = time.time() toc = time.time()
resp_obj = { resp_obj = {
"verified": distance <= threshold, "verified": True if distance <= threshold else False,
"distance": distance, "distance": distance,
"threshold": threshold, "threshold": threshold,
"model": model_name, "model": model_name,
@ -236,7 +236,7 @@ def verify(
def analyze( def analyze(
img_path: Union[str, np.ndarray], img_path: Union[str, np.ndarray],
actions: Tuple[str, ...] = ("emotion", "age", "gender", "race"), actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
enforce_detection: bool = True, enforce_detection: bool = True,
detector_backend: str = "opencv", detector_backend: str = "opencv",
align: bool = True, align: bool = True,
@ -414,14 +414,14 @@ def analyze(
def find( def find(
img_path: Union[str, np.ndarray], img_path: Union[str, np.ndarray],
db_path : str, db_path: str,
model_name : str ="VGG-Face", model_name: str = "VGG-Face",
distance_metric : str ="cosine", distance_metric: str = "cosine",
enforce_detection : bool =True, enforce_detection: bool = True,
detector_backend : str ="opencv", detector_backend: str = "opencv",
align : bool = True, align: bool = True,
normalization : str ="base", normalization: str = "base",
silent : bool = False, silent: bool = False,
) -> List[pd.DataFrame]: ) -> List[pd.DataFrame]:
""" """
This function applies verification several times and find the identities in a database This function applies verification several times and find the identities in a database

View File

@ -104,9 +104,7 @@ def load_image(img):
# The image is a url # The image is a url
if img.startswith("http"): if img.startswith("http"):
return ( return (
np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("BGR"))[ np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("BGR")),
:, :, ::-1
],
# return url as image name # return url as image name
img, img,
) )

133
tests/test_analyze.py Normal file
View File

@ -0,0 +1,133 @@
import cv2
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_analyze.py")
detectors = ["opencv", "mtcnn"]
def test_standard_analyze():
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img, silent=True)
for demography in demography_objs:
logger.debug(demography)
assert demography["age"] > 20 and demography["age"] < 40
assert demography["dominant_gender"] == "Woman"
logger.info("✅ test standard analyze done")
def test_analyze_with_all_actions_as_tuple():
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(
img, actions=("age", "gender", "race", "emotion"), silent=True
)
for demography in demography_objs:
logger.debug(f"Demography: {demography}")
age = demography["age"]
gender = demography["dominant_gender"]
race = demography["dominant_race"]
emotion = demography["dominant_emotion"]
logger.debug(f"Age: {age}")
logger.debug(f"Gender: {gender}")
logger.debug(f"Race: {race}")
logger.debug(f"Emotion: {emotion}")
assert demography.get("age") is not None
assert demography.get("dominant_gender") is not None
assert demography.get("dominant_race") is not None
assert demography.get("dominant_emotion") is not None
logger.info("✅ test analyze for all actions as tuple done")
def test_analyze_with_all_actions_as_list():
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(
img, actions=["age", "gender", "race", "emotion"], silent=True
)
for demography in demography_objs:
logger.debug(f"Demography: {demography}")
age = demography["age"]
gender = demography["dominant_gender"]
race = demography["dominant_race"]
emotion = demography["dominant_emotion"]
logger.debug(f"Age: {age}")
logger.debug(f"Gender: {gender}")
logger.debug(f"Race: {race}")
logger.debug(f"Emotion: {emotion}")
assert demography.get("age") is not None
assert demography.get("dominant_gender") is not None
assert demography.get("dominant_race") is not None
assert demography.get("dominant_emotion") is not None
logger.info("✅ test analyze for all actions as array done")
def test_analyze_for_some_actions():
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img, ["age", "gender"], silent=True)
for demography in demography_objs:
age = demography["age"]
gender = demography["dominant_gender"]
logger.debug(f"Age: { age }")
logger.debug(f"Gender: {gender}")
assert demography.get("age") is not None
assert demography.get("dominant_gender") is not None
# these are not in actions
assert demography.get("dominant_race") is None
assert demography.get("dominant_emotion") is None
logger.info("✅ test analyze for some actions done")
def test_analyze_for_preloaded_image():
img = cv2.imread("dataset/img1.jpg")
resp_objs = DeepFace.analyze(img, silent=True)
for resp_obj in resp_objs:
logger.debug(resp_obj)
assert resp_obj["age"] > 20 and resp_obj["age"] < 40
assert resp_obj["dominant_gender"] == "Woman"
logger.info("✅ test analyze for pre-loaded image done")
def test_analyze_for_different_detectors():
img_paths = [
"dataset/img1.jpg",
"dataset/img5.jpg",
"dataset/img6.jpg",
"dataset/img8.jpg",
"dataset/img1.jpg",
"dataset/img2.jpg",
"dataset/img1.jpg",
"dataset/img2.jpg",
"dataset/img6.jpg",
"dataset/img6.jpg",
]
for img_path in img_paths:
for detector in detectors:
results = DeepFace.analyze(
img_path, actions=("gender",), detector_backend=detector, enforce_detection=False
)
for result in results:
logger.debug(result)
# validate keys
assert "gender" in result.keys()
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
"Man",
"Woman",
]
# validate probabilities
if result["dominant_gender"] == "Man":
assert result["gender"]["Man"] > result["gender"]["Woman"]
else:
assert result["gender"]["Man"] < result["gender"]["Woman"]

View File

@ -0,0 +1,46 @@
import pytest
import numpy as np
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_enforce_detection.py")
def test_enabled_enforce_detection_for_non_facial_input():
black_img = np.zeros([224, 224, 3])
with pytest.raises(ValueError, match="Face could not be detected."):
DeepFace.represent(img_path=black_img)
with pytest.raises(ValueError, match="Face could not be detected."):
DeepFace.verify(img1_path=black_img, img2_path=black_img)
logger.info("✅ enabled enforce detection with non facial input tests done")
def test_disabled_enforce_detection_for_non_facial_input_on_represent():
black_img = np.zeros([224, 224, 3])
objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
assert isinstance(objs, list)
assert len(objs) > 0
assert isinstance(objs[0], dict)
assert "embedding" in objs[0].keys()
assert "facial_area" in objs[0].keys()
assert isinstance(objs[0]["facial_area"], dict)
assert "x" in objs[0]["facial_area"].keys()
assert "y" in objs[0]["facial_area"].keys()
assert "w" in objs[0]["facial_area"].keys()
assert "h" in objs[0]["facial_area"].keys()
assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face
logger.info("✅ disabled enforce detection with non facial input test for represent tests done")
def test_disabled_enforce_detection_for_non_facial_input_on_verify():
black_img = np.zeros([224, 224, 3])
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
assert isinstance(obj, dict)
logger.info("✅ disabled enforce detection with non facial input test for verify tests done")

View File

@ -0,0 +1,24 @@
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_extract_faces.py")
def test_different_detectors():
detectors = ["opencv", "mtcnn"]
for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector)
for img_obj in img_objs:
assert "face" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "confidence" in img_obj.keys()
img = img_obj["face"]
assert img.shape[0] > 0 and img.shape[1] > 0
logger.info(f"✅ extract_faces for {detector} backend test is done")

26
tests/test_find.py Normal file
View File

@ -0,0 +1,26 @@
import cv2
import pandas as pd
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_find.py")
def test_find_with_exact_path():
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset", silent=True)
for df in dfs:
assert isinstance(df, pd.DataFrame)
logger.debug(df.head())
assert df.shape[0] > 0
logger.info("✅ test find for exact path done")
def test_find_with_array_input():
img1 = cv2.imread("dataset/img1.jpg")
dfs = DeepFace.find(img1, db_path="dataset", silent=True)
for df in dfs:
logger.debug(df.head())
assert df.shape[0] > 0
logger.info("✅ test find for array input done")

30
tests/test_represent.py Normal file
View File

@ -0,0 +1,30 @@
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_represent.py")
def test_standard_represent():
img_path = "dataset/img1.jpg"
embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"]
logger.info(f"Function returned {len(embedding)} dimensional vector")
assert len(embedding) == 2622
logger.info("✅ test standard represent function done")
def test_represent_for_skipped_detector_backend():
face_img = "dataset/img5.jpg"
img_objs = DeepFace.represent(img_path=face_img, detector_backend="skip")
assert len(img_objs) >= 1
img_obj = img_objs[0]
assert "embedding" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "face_confidence" in img_obj.keys()
logger.info("✅ test represent function for skipped detector backend done")

102
tests/test_verify.py Normal file
View File

@ -0,0 +1,102 @@
import cv2
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger("tests/test_facial_recognition_models.py")
models = ["VGG-Face", "Facenet", "Facenet512", "ArcFace"]
metrics = ["cosine", "euclidean", "euclidean_l2"]
detectors = ["opencv", "mtcnn"]
def test_different_facial_recognition_models():
dataset = [
["dataset/img1.jpg", "dataset/img2.jpg", True],
["dataset/img5.jpg", "dataset/img6.jpg", True],
["dataset/img6.jpg", "dataset/img7.jpg", True],
["dataset/img8.jpg", "dataset/img9.jpg", True],
["dataset/img1.jpg", "dataset/img11.jpg", True],
["dataset/img2.jpg", "dataset/img11.jpg", True],
["dataset/img1.jpg", "dataset/img3.jpg", False],
["dataset/img2.jpg", "dataset/img3.jpg", False],
["dataset/img6.jpg", "dataset/img8.jpg", False],
["dataset/img6.jpg", "dataset/img9.jpg", False],
]
expected_coverage = 97.53 # human level accuracy on LFW
successful_tests = 0
unsuccessful_tests = 0
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
threshold = resp_obj["threshold"]
if prediction is result:
test_result_label = ""
successful_tests += 1
else:
test_result_label = ""
unsuccessful_tests += 1
if prediction is True:
classified_label = "same person"
else:
classified_label = "different persons"
img1_alias = img1.split("/", maxsplit=1)[-1]
img2_alias = img2.split("/", maxsplit=1)[-1]
logger.debug(
f"{test_result_label} Pair {img1_alias}-{img2_alias}"
f" is {classified_label} based on {model}-{metric}"
f" (Distance: {distance}, Threshold: {threshold})",
)
coverage_score = (100 * successful_tests) / (successful_tests + unsuccessful_tests)
assert (
coverage_score > expected_coverage
), f"⛔ facial recognition models test failed with {coverage_score} score"
logger.info(f"✅ facial recognition models test passed with {coverage_score}")
def test_different_face_detectors():
for detector in detectors:
res = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", detector_backend=detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
assert res["verified"] in [True, False]
assert "distance" in res.keys()
assert "threshold" in res.keys()
assert "model" in res.keys()
assert "detector_backend" in res.keys()
assert "similarity_metric" in res.keys()
assert "facial_areas" in res.keys()
assert "img1" in res["facial_areas"].keys()
assert "img2" in res["facial_areas"].keys()
assert "x" in res["facial_areas"]["img1"].keys()
assert "y" in res["facial_areas"]["img1"].keys()
assert "w" in res["facial_areas"]["img1"].keys()
assert "h" in res["facial_areas"]["img1"].keys()
assert "x" in res["facial_areas"]["img2"].keys()
assert "y" in res["facial_areas"]["img2"].keys()
assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys()
logger.info(f"✅ test verify for {detector} backend done")
def test_verify_for_preloaded_image():
img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg")
res = DeepFace.verify(img1, img2)
assert res["verified"] is True
logger.info("✅ test verify for pre-loaded image done")

View File

@ -1,391 +0,0 @@
import warnings
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import cv2
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger()
# pylint: disable=consider-iterating-dictionary,broad-except
logger.info("-----------------------------------------")
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf_major_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_major_version == 2:
import logging
tf.get_logger().setLevel(logging.ERROR)
logger.info("Running unit tests for TF " + tf.__version__)
logger.info("-----------------------------------------")
expected_coverage = 97
num_cases = 0
succeed_cases = 0
def evaluate(condition):
global num_cases, succeed_cases
if condition == True:
succeed_cases += 1
num_cases += 1
# ------------------------------------------------
detectors = ["opencv", "mtcnn"]
models = ["VGG-Face", "Facenet", "Facenet512", "ArcFace"]
metrics = ["cosine", "euclidean", "euclidean_l2"]
dataset = [
["dataset/img1.jpg", "dataset/img2.jpg", True],
["dataset/img5.jpg", "dataset/img6.jpg", True],
["dataset/img6.jpg", "dataset/img7.jpg", True],
["dataset/img8.jpg", "dataset/img9.jpg", True],
["dataset/img1.jpg", "dataset/img11.jpg", True],
["dataset/img2.jpg", "dataset/img11.jpg", True],
["dataset/img1.jpg", "dataset/img3.jpg", False],
["dataset/img2.jpg", "dataset/img3.jpg", False],
["dataset/img6.jpg", "dataset/img8.jpg", False],
["dataset/img6.jpg", "dataset/img9.jpg", False],
]
logger.info("-----------------------------------------")
def test_cases():
logger.info("Enforce detection test")
black_img = np.zeros([224, 224, 3])
# enforce detection on for represent
try:
DeepFace.represent(img_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for represent
try:
objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
exception_thrown = False
# validate response of represent function
assert isinstance(objs, list)
assert len(objs) > 0
assert isinstance(objs[0], dict)
assert "embedding" in objs[0].keys()
assert "facial_area" in objs[0].keys()
assert isinstance(objs[0]["facial_area"], dict)
assert "x" in objs[0]["facial_area"].keys()
assert "y" in objs[0]["facial_area"].keys()
assert "w" in objs[0]["facial_area"].keys()
assert "h" in objs[0]["facial_area"].keys()
assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face
except Exception as err:
logger.error(f"Unexpected exception thrown: {str(err)}")
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
# enforce detection on for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
assert isinstance(obj, dict)
exception_thrown = False
except Exception as err:
logger.error(f"Unexpected exception thrown: {str(err)}")
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
# Test represent on user-given image (skip detector)
try:
face_img = dataset[1][0] # It's a face
img_objs = DeepFace.represent(img_path=face_img, detector_backend="skip")
assert len(img_objs) == 1
img_obj = img_objs[0]
assert "embedding" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "face_confidence" in img_obj.keys()
exception_thrown = False
except Exception as e:
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
logger.info("-----------------------------------------")
logger.info("Extract faces test")
for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector)
for img_obj in img_objs:
assert "face" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "confidence" in img_obj.keys()
img = img_obj["face"]
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
logger.info(f"{detector} test is done")
logger.info("-----------------------------------------")
img_path = "dataset/img1.jpg"
embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"]
logger.info(f"Function returned {len(embedding)} dimensional vector")
evaluate(len(embedding) == 2622)
logger.info("-----------------------------------------")
logger.info("Different face detectors on verification test")
for detector in detectors:
logger.info(detector + " detector")
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend=detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
assert res["verified"] in [True, False]
assert "distance" in res.keys()
assert "threshold" in res.keys()
assert "model" in res.keys()
assert "detector_backend" in res.keys()
assert "similarity_metric" in res.keys()
assert "facial_areas" in res.keys()
assert "img1" in res["facial_areas"].keys()
assert "img2" in res["facial_areas"].keys()
assert "x" in res["facial_areas"]["img1"].keys()
assert "y" in res["facial_areas"]["img1"].keys()
assert "w" in res["facial_areas"]["img1"].keys()
assert "h" in res["facial_areas"]["img1"].keys()
assert "x" in res["facial_areas"]["img2"].keys()
assert "y" in res["facial_areas"]["img2"].keys()
assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys()
logger.info(res)
evaluate(res["verified"] == dataset[0][2])
logger.info("-----------------------------------------")
logger.info("Find function test")
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
for df in dfs:
assert isinstance(df, pd.DataFrame)
logger.info(df.head())
evaluate(df.shape[0] > 0)
logger.info("-----------------------------------------")
logger.info("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img)
for demography in demography_objs:
logger.info(demography)
evaluate(demography["age"] > 20 and demography["age"] < 40)
evaluate(demography["dominant_gender"] == "Woman")
logger.info("-----------------------------------------")
logger.info("Facial analysis test. Passing all to the action")
demography_objs = DeepFace.analyze(img, ["age", "gender", "race", "emotion"])
for demography in demography_objs:
logger.debug(f"Demography: {demography}")
# check response is a valid json
age = demography["age"]
gender = demography["dominant_gender"]
race = demography["dominant_race"]
emotion = demography["dominant_emotion"]
logger.info(f"Age: {age}")
logger.info(f"Gender: {gender}")
logger.info(f"Race: {race}")
logger.info(f"Emotion: {emotion}")
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is not None)
evaluate(demography.get("dominant_emotion") is not None)
logger.info("-----------------------------------------")
logger.info("Facial analysis test 2. Remove some actions and check they are not computed")
demography_objs = DeepFace.analyze(img, ["age", "gender"])
for demography in demography_objs:
age = demography["age"]
gender = demography["dominant_gender"]
logger.info(f"Age: { age }")
logger.info(f"Gender: {gender}")
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is None)
evaluate(demography.get("dominant_emotion") is None)
logger.info("-----------------------------------------")
logger.info("Facial recognition tests")
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
threshold = resp_obj["threshold"]
passed = prediction == result
evaluate(passed)
if passed:
test_result_label = "passed"
else:
test_result_label = "failed"
if prediction == True:
classified_label = "verified"
else:
classified_label = "unverified"
img1_alias = img1.split("/", maxsplit=1)[-1]
img2_alias = img2.split("/", maxsplit=1)[-1]
logger.info(
f"{img1_alias} - {img2_alias}"
f". {classified_label} as same person based on {model} and {metric}"
f". Distance: {distance}, Threshold:{threshold} ({test_result_label})",
)
logger.info("--------------------------")
# -----------------------------------------
logger.info("Passing numpy array to analyze function")
img = cv2.imread("dataset/img1.jpg")
resp_objs = DeepFace.analyze(img)
for resp_obj in resp_objs:
logger.info(resp_obj)
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
evaluate(resp_obj["gender"] == "Woman")
logger.info("--------------------------")
logger.info("Passing numpy array to verify function")
img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg")
res = DeepFace.verify(img1, img2)
logger.info(res)
evaluate(res["verified"] == True)
logger.info("--------------------------")
logger.info("Passing numpy array to find function")
img1 = cv2.imread("dataset/img1.jpg")
dfs = DeepFace.find(img1, db_path="dataset")
for df in dfs:
logger.info(df.head())
evaluate(df.shape[0] > 0)
logger.info("--------------------------")
logger.info("non-binary gender tests")
# interface validation - no need to call evaluate here
for img1_path, _, _ in dataset:
for detector in detectors:
results = DeepFace.analyze(
img1_path, actions=("gender",), detector_backend=detector, enforce_detection=False
)
for result in results:
logger.info(result)
assert "gender" in result.keys()
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
"Man",
"Woman",
]
if result["dominant_gender"] == "Man":
assert result["gender"]["Man"] > result["gender"]["Woman"]
else:
assert result["gender"]["Man"] < result["gender"]["Woman"]
# ---------------------------------------------
test_cases()
logger.info("num of test cases run: " + str(num_cases))
logger.info("succeeded test cases: " + str(succeed_cases))
test_score = (100 * succeed_cases) / num_cases
logger.info("test coverage: " + str(test_score))
if test_score > expected_coverage:
logger.info("well done! min required test coverage is satisfied")
else:
logger.info("min required test coverage is NOT satisfied")
assert test_score > expected_coverage