mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
code style
This commit is contained in:
parent
48d1c90589
commit
5d767e2d49
@ -418,25 +418,21 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
|
||||
resp_obj["age"] = int(apparent_age) #int cast is for the exception - object of type 'float32' is not JSON serializable
|
||||
|
||||
elif action == 'gender':
|
||||
try:
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
|
||||
gender_predictions = models['gender'].predict(img_224)[0,:]
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
|
||||
gender_labels = ["Woman", "Man"]
|
||||
resp_obj["gender"] = {}
|
||||
gender_predictions = models['gender'].predict(img_224)[0,:]
|
||||
|
||||
for i in range(0, len(gender_labels)):
|
||||
gender_label = gender_labels[i]
|
||||
gender_prediction = 100 * gender_predictions[i]
|
||||
resp_obj["gender"][gender_label] = gender_prediction
|
||||
gender_labels = ["Woman", "Man"]
|
||||
resp_obj["gender"] = {}
|
||||
|
||||
for i, gender_label in enumerate(gender_labels):
|
||||
gender_prediction = 100 * gender_predictions[i]
|
||||
resp_obj["gender"][gender_label] = gender_prediction
|
||||
|
||||
resp_obj["dominant_gender"] = gender_labels[np.argmax(gender_predictions)]
|
||||
|
||||
resp_obj["dominant_gender"] = gender_labels[np.argmax(gender_predictions)]
|
||||
except Exception as e:
|
||||
resp_obj["dominant_gender"] = None
|
||||
resp_obj["gender"] = None
|
||||
resp_obj["error"] = e
|
||||
elif action == 'race':
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True) #just emotion model expects grayscale images
|
||||
|
@ -1,35 +0,0 @@
|
||||
from deepface import DeepFace
|
||||
|
||||
dataset = [
|
||||
'dataset/img1.jpg',
|
||||
'dataset/img5.jpg',
|
||||
'dataset/img6.jpg',
|
||||
'dataset/img7.jpg',
|
||||
'dataset/img9.jpg',
|
||||
'dataset/img11.jpg',
|
||||
'dataset/img11.jpg',
|
||||
]
|
||||
|
||||
|
||||
def test_gender_prediction():
|
||||
detectors = ['opencv', 'ssd', 'retinaface', 'mtcnn'] # dlib not tested
|
||||
for detector in detectors:
|
||||
test_gender_prediction_with_detector(detector)
|
||||
|
||||
|
||||
def test_gender_prediction_with_detector(detector):
|
||||
results = DeepFace.analyze(dataset, actions=('gender',), detector_backend=detector, prog_bar=False,
|
||||
enforce_detection=False)
|
||||
for result in results:
|
||||
assert 'gender' in result.keys()
|
||||
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
|
||||
if result["dominant_gender"] == "Man":
|
||||
assert result["gender"]["Man"] > result["gender"]["Woman"]
|
||||
else:
|
||||
assert result["gender"]["Man"] < result["gender"]["Woman"]
|
||||
print(f'detector {detector} passed')
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_gender_prediction()
|
@ -3,7 +3,6 @@ import os
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
from deepface import DeepFace
|
||||
from tests.test_nonbinary_gender import test_gender_prediction, test_gender_prediction_with_detector
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
@ -207,14 +206,27 @@ def test_cases():
|
||||
|
||||
print("--------------------------")
|
||||
|
||||
print("non-binary gender tests")
|
||||
|
||||
def run_gender_prediction_test():
|
||||
for detector in detectors:
|
||||
evaluate(test_gender_prediction_with_detector(detector))
|
||||
#interface validation - no need to call evaluate here
|
||||
|
||||
for img1_path, img2_path, verified in dataset:
|
||||
for detector in detectors:
|
||||
result = DeepFace.analyze(img1_path, actions=('gender',), detector_backend=detector, enforce_detection=False)
|
||||
|
||||
print(result)
|
||||
|
||||
assert 'gender' in result.keys()
|
||||
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
|
||||
|
||||
if result["dominant_gender"] == "Man":
|
||||
assert result["gender"]["Man"] > result["gender"]["Woman"]
|
||||
else:
|
||||
assert result["gender"]["Man"] < result["gender"]["Woman"]
|
||||
|
||||
# ---------------------------------------------
|
||||
|
||||
test_cases()
|
||||
run_gender_prediction_test()
|
||||
|
||||
print("num of test cases run: " + str(num_cases))
|
||||
print("succeeded test cases: " + str(succeed_cases))
|
||||
|
Loading…
x
Reference in New Issue
Block a user