mirror of
https://github.com/serengil/deepface.git
synced 2025-06-09 12:57:08 +00:00
Merge pull request #497 from shirasael/feature/non-binary-gender-result
Feature/non binary gender result
This commit is contained in:
commit
48d1c90589
@ -294,7 +294,11 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
|
||||
{
|
||||
"region": {'x': 230, 'y': 120, 'w': 36, 'h': 45},
|
||||
"age": 28.66,
|
||||
"gender": "woman",
|
||||
"dominant_gender": "Woman",
|
||||
"gender": {
|
||||
'Woman': 99.99407529830933,
|
||||
'Man': 0.005928758764639497,
|
||||
}
|
||||
"dominant_emotion": "neutral",
|
||||
"emotion": {
|
||||
'sad': 37.65260875225067,
|
||||
@ -414,18 +418,25 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
|
||||
resp_obj["age"] = int(apparent_age) #int cast is for the exception - object of type 'float32' is not JSON serializable
|
||||
|
||||
elif action == 'gender':
|
||||
try:
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
|
||||
gender_prediction = models['gender'].predict(img_224)[0,:]
|
||||
gender_predictions = models['gender'].predict(img_224)[0,:]
|
||||
|
||||
if np.argmax(gender_prediction) == 0:
|
||||
gender = "Woman"
|
||||
elif np.argmax(gender_prediction) == 1:
|
||||
gender = "Man"
|
||||
gender_labels = ["Woman", "Man"]
|
||||
resp_obj["gender"] = {}
|
||||
|
||||
resp_obj["gender"] = gender
|
||||
for i in range(0, len(gender_labels)):
|
||||
gender_label = gender_labels[i]
|
||||
gender_prediction = 100 * gender_predictions[i]
|
||||
resp_obj["gender"][gender_label] = gender_prediction
|
||||
|
||||
resp_obj["dominant_gender"] = gender_labels[np.argmax(gender_predictions)]
|
||||
except Exception as e:
|
||||
resp_obj["dominant_gender"] = None
|
||||
resp_obj["gender"] = None
|
||||
resp_obj["error"] = e
|
||||
elif action == 'race':
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True) #just emotion model expects grayscale images
|
||||
@ -444,7 +455,7 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
|
||||
|
||||
#-----------------------------
|
||||
|
||||
if is_region_set != True:
|
||||
if is_region_set != True and region:
|
||||
resp_obj["region"] = {}
|
||||
is_region_set = True
|
||||
for i, parameter in enumerate(region_labels):
|
||||
@ -458,14 +469,8 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
|
||||
return resp_obj
|
||||
|
||||
if bulkProcess == True:
|
||||
return resp_objects
|
||||
|
||||
resp_obj = {}
|
||||
|
||||
for i in range(0, len(resp_objects)):
|
||||
resp_item = resp_objects[i]
|
||||
resp_obj["instance_%d" % (i+1)] = resp_item
|
||||
|
||||
return resp_obj
|
||||
|
||||
def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True, detector_backend = 'opencv', align = True, prog_bar = True, normalization = 'base', silent=False):
|
||||
|
||||
|
@ -83,7 +83,7 @@ def load_image(img):
|
||||
img = loadBase64Img(img)
|
||||
|
||||
elif url_img:
|
||||
img = np.array(Image.open(requests.get(img, stream=True).raw))
|
||||
img = np.array(Image.open(requests.get(img, stream=True).raw).convert('RGB'))
|
||||
|
||||
elif exact_image != True: #image path passed as input
|
||||
if os.path.isfile(img) != True:
|
||||
|
@ -18,6 +18,7 @@ elif tf_version == 2:
|
||||
|
||||
#url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
|
||||
|
||||
|
||||
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5'):
|
||||
|
||||
model = VGGFace.baseModel()
|
||||
|
35
tests/test_nonbinary_gender.py
Normal file
35
tests/test_nonbinary_gender.py
Normal file
@ -0,0 +1,35 @@
|
||||
from deepface import DeepFace
|
||||
|
||||
dataset = [
|
||||
'dataset/img1.jpg',
|
||||
'dataset/img5.jpg',
|
||||
'dataset/img6.jpg',
|
||||
'dataset/img7.jpg',
|
||||
'dataset/img9.jpg',
|
||||
'dataset/img11.jpg',
|
||||
'dataset/img11.jpg',
|
||||
]
|
||||
|
||||
|
||||
def test_gender_prediction():
|
||||
detectors = ['opencv', 'ssd', 'retinaface', 'mtcnn'] # dlib not tested
|
||||
for detector in detectors:
|
||||
test_gender_prediction_with_detector(detector)
|
||||
|
||||
|
||||
def test_gender_prediction_with_detector(detector):
|
||||
results = DeepFace.analyze(dataset, actions=('gender',), detector_backend=detector, prog_bar=False,
|
||||
enforce_detection=False)
|
||||
for result in results:
|
||||
assert 'gender' in result.keys()
|
||||
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
|
||||
if result["dominant_gender"] == "Man":
|
||||
assert result["gender"]["Man"] > result["gender"]["Woman"]
|
||||
else:
|
||||
assert result["gender"]["Man"] < result["gender"]["Woman"]
|
||||
print(f'detector {detector} passed')
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_gender_prediction()
|
@ -3,6 +3,7 @@ import os
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
from deepface import DeepFace
|
||||
from tests.test_nonbinary_gender import test_gender_prediction, test_gender_prediction_with_detector
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
@ -96,7 +97,7 @@ def test_cases():
|
||||
print(demography)
|
||||
|
||||
evaluate(demography["age"] > 20 and demography["age"] < 40)
|
||||
evaluate(demography["gender"] == "Woman")
|
||||
evaluate(demography["dominant_gender"] == "Woman")
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
@ -108,12 +109,12 @@ def test_cases():
|
||||
|
||||
#check response is a valid json
|
||||
print("Age: ", demography["age"])
|
||||
print("Gender: ", demography["gender"])
|
||||
print("Gender: ", demography["dominant_gender"])
|
||||
print("Race: ", demography["dominant_race"])
|
||||
print("Emotion: ", demography["dominant_emotion"])
|
||||
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("gender") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is not None)
|
||||
evaluate(demography.get("dominant_emotion") is not None)
|
||||
|
||||
@ -123,12 +124,12 @@ def test_cases():
|
||||
demography = DeepFace.analyze(img, ['age', 'gender'])
|
||||
|
||||
print("Age: ", demography.get("age"))
|
||||
print("Gender: ", demography.get("gender"))
|
||||
print("Gender: ", demography.get("dominant_gender"))
|
||||
print("Race: ", demography.get("dominant_race"))
|
||||
print("Emotion: ", demography.get("dominant_emotion"))
|
||||
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("gender") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is None)
|
||||
evaluate(demography.get("dominant_emotion") is None)
|
||||
|
||||
@ -206,7 +207,14 @@ def test_cases():
|
||||
|
||||
print("--------------------------")
|
||||
|
||||
|
||||
def run_gender_prediction_test():
|
||||
for detector in detectors:
|
||||
evaluate(test_gender_prediction_with_detector(detector))
|
||||
|
||||
|
||||
test_cases()
|
||||
run_gender_prediction_test()
|
||||
|
||||
print("num of test cases run: " + str(num_cases))
|
||||
print("succeeded test cases: " + str(succeed_cases))
|
||||
|
Loading…
x
Reference in New Issue
Block a user