mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 20:15:21 +00:00
Merge pull request #188 from vjsyong/master
Retrieve bounding box from analysis
This commit is contained in:
commit
8af418d052
3
.gitignore
vendored
3
.gitignore
vendored
@ -14,4 +14,5 @@ deepface/commons/__pycache__/*
|
|||||||
deepface/basemodels/__pycache__/*
|
deepface/basemodels/__pycache__/*
|
||||||
deepface/extendedmodels/__pycache__/*
|
deepface/extendedmodels/__pycache__/*
|
||||||
deepface/subsidiarymodels/__pycache__/*
|
deepface/subsidiarymodels/__pycache__/*
|
||||||
tests/dataset/*.pkl
|
tests/dataset/*.pkl
|
||||||
|
Age_Gender_Retail_Analysis.ipynb
|
@ -282,6 +282,12 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
The function returns a dictionary. If img_path is a list, then it will return list of dictionary.
|
The function returns a dictionary. If img_path is a list, then it will return list of dictionary.
|
||||||
|
|
||||||
{
|
{
|
||||||
|
"region": {
|
||||||
|
'x': 230,
|
||||||
|
'y': 120,
|
||||||
|
'w': 36,
|
||||||
|
'h': 45
|
||||||
|
}
|
||||||
"age": 28.66,
|
"age": 28.66,
|
||||||
"gender": "woman",
|
"gender": "woman",
|
||||||
"dominant_emotion": "neutral",
|
"dominant_emotion": "neutral",
|
||||||
@ -362,6 +368,10 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
pbar = tqdm(range(0,len(actions)), desc='Finding actions', disable = disable_option)
|
pbar = tqdm(range(0,len(actions)), desc='Finding actions', disable = disable_option)
|
||||||
|
|
||||||
img_224 = None # Set to prevent re-detection
|
img_224 = None # Set to prevent re-detection
|
||||||
|
|
||||||
|
region = [] # x, y, w, h of the detected face region
|
||||||
|
|
||||||
|
region_labels = ['x', 'y', 'w', 'h']
|
||||||
|
|
||||||
#facial attribute analysis
|
#facial attribute analysis
|
||||||
for index in pbar:
|
for index in pbar:
|
||||||
@ -370,7 +380,12 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
|
|
||||||
if action == 'emotion':
|
if action == 'emotion':
|
||||||
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
||||||
img = functions.preprocess_face(img = img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
img, region = functions.preprocess_face(img = img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||||
|
|
||||||
|
resp_obj["region"] = {}
|
||||||
|
|
||||||
|
for i, parameter in enumerate(region_labels):
|
||||||
|
resp_obj["region"][parameter] = region[i]
|
||||||
|
|
||||||
emotion_predictions = models['emotion'].predict(img)[0,:]
|
emotion_predictions = models['emotion'].predict(img)[0,:]
|
||||||
|
|
||||||
@ -387,7 +402,13 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
|
|
||||||
elif action == 'age':
|
elif action == 'age':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||||
|
|
||||||
|
resp_obj["region"] = {}
|
||||||
|
|
||||||
|
for i, parameter in enumerate(region_labels):
|
||||||
|
resp_obj["region"][parameter] = region[i]
|
||||||
|
|
||||||
age_predictions = models['age'].predict(img_224)[0,:]
|
age_predictions = models['age'].predict(img_224)[0,:]
|
||||||
apparent_age = Age.findApparentAge(age_predictions)
|
apparent_age = Age.findApparentAge(age_predictions)
|
||||||
|
|
||||||
@ -395,8 +416,13 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
|
|
||||||
elif action == 'gender':
|
elif action == 'gender':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||||
|
|
||||||
|
resp_obj["region"] = {}
|
||||||
|
|
||||||
|
for i, parameter in enumerate(region_labels):
|
||||||
|
resp_obj["region"][parameter] = region[i]
|
||||||
|
|
||||||
gender_prediction = models['gender'].predict(img_224)[0,:]
|
gender_prediction = models['gender'].predict(img_224)[0,:]
|
||||||
|
|
||||||
if np.argmax(gender_prediction) == 0:
|
if np.argmax(gender_prediction) == 0:
|
||||||
@ -408,10 +434,15 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race']
|
|||||||
|
|
||||||
elif action == 'race':
|
elif action == 'race':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend) #just emotion model expects grayscale images
|
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True) #just emotion model expects grayscale images
|
||||||
race_predictions = models['race'].predict(img_224)[0,:]
|
race_predictions = models['race'].predict(img_224)[0,:]
|
||||||
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
||||||
|
|
||||||
|
resp_obj["region"] = {}
|
||||||
|
|
||||||
|
for i, parameter in enumerate(region_labels):
|
||||||
|
resp_obj["region"][parameter] = region[i]
|
||||||
|
|
||||||
sum_of_predictions = race_predictions.sum()
|
sum_of_predictions = race_predictions.sum()
|
||||||
|
|
||||||
resp_obj["race"] = {}
|
resp_obj["race"] = {}
|
||||||
|
@ -206,7 +206,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
if len(faces) > 0:
|
if len(faces) > 0:
|
||||||
x,y,w,h = faces[0] #focus on the 1st face found in the image
|
x,y,w,h = faces[0] #focus on the 1st face found in the image
|
||||||
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
|
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
|
||||||
return detected_face
|
return detected_face, [x, y, w, h]
|
||||||
|
|
||||||
else: #if no face detected
|
else: #if no face detected
|
||||||
|
|
||||||
@ -260,7 +260,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
|
|
||||||
detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)]
|
detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)]
|
||||||
|
|
||||||
return detected_face
|
return detected_face, [int(left*aspect_ratio_x), int(top*aspect_ratio_y), int(right*aspect_ratio_x) - int(left*aspect_ratio_x), int(bottom*aspect_ratio_y) - int(top*aspect_ratio_y)]
|
||||||
|
|
||||||
else: #if no face detected
|
else: #if no face detected
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
|
|
||||||
detected_face = img[top:bottom, left:right]
|
detected_face = img[top:bottom, left:right]
|
||||||
|
|
||||||
return detected_face
|
return detected_face, [left, right, right - left, bottom - top]
|
||||||
|
|
||||||
else: #if no face detected
|
else: #if no face detected
|
||||||
|
|
||||||
@ -302,7 +302,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
detection = detections[0]
|
detection = detections[0]
|
||||||
x, y, w, h = detection["box"]
|
x, y, w, h = detection["box"]
|
||||||
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
|
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
|
||||||
return detected_face
|
return detected_face, [x, y, w, h]
|
||||||
|
|
||||||
else: #if no face detected
|
else: #if no face detected
|
||||||
if not enforce_detection:
|
if not enforce_detection:
|
||||||
@ -436,7 +436,7 @@ def align_face(img, detector_backend = 'opencv'):
|
|||||||
|
|
||||||
return img #return img anyway
|
return img #return img anyway
|
||||||
|
|
||||||
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv'):
|
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False):
|
||||||
|
|
||||||
#img_path = copy.copy(img)
|
#img_path = copy.copy(img)
|
||||||
|
|
||||||
@ -444,7 +444,7 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete
|
|||||||
img = load_image(img)
|
img = load_image(img)
|
||||||
base_img = img.copy()
|
base_img = img.copy()
|
||||||
|
|
||||||
img = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection)
|
img, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
@ -468,7 +468,10 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete
|
|||||||
img_pixels = np.expand_dims(img_pixels, axis = 0)
|
img_pixels = np.expand_dims(img_pixels, axis = 0)
|
||||||
img_pixels /= 255 #normalize input in [0, 1]
|
img_pixels /= 255 #normalize input in [0, 1]
|
||||||
|
|
||||||
return img_pixels
|
if return_region == True:
|
||||||
|
return img_pixels, region
|
||||||
|
else:
|
||||||
|
return img_pixels
|
||||||
|
|
||||||
def find_input_shape(model):
|
def find_input_shape(model):
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user