mirror of
https://github.com/serengil/deepface.git
synced 2025-07-29 14:21:39 +00:00
Let preprocess_face always return region
This makes the function easier to use, and lets use refactor later to a list of regions.
This commit is contained in:
parent
61e6de4048
commit
5adb041e19
@ -384,7 +384,7 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race'] , models =
|
||||
|
||||
if action == 'emotion':
|
||||
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
||||
img, region = functions.preprocess_face(img = img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
img, region = functions.preprocess_face(img = img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
||||
|
||||
emotion_predictions = models['emotion'].predict(img)[0,:]
|
||||
|
||||
@ -401,7 +401,7 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race'] , models =
|
||||
|
||||
elif action == 'age':
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
||||
|
||||
age_predictions = models['age'].predict(img_224)[0,:]
|
||||
apparent_age = Age.findApparentAge(age_predictions)
|
||||
@ -410,7 +410,7 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race'] , models =
|
||||
|
||||
elif action == 'gender':
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend)
|
||||
|
||||
gender_prediction = models['gender'].predict(img_224)[0,:]
|
||||
|
||||
@ -423,7 +423,7 @@ def analyze(img_path, actions = ['emotion', 'age', 'gender', 'race'] , models =
|
||||
|
||||
elif action == 'race':
|
||||
if img_224 is None:
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True) #just emotion model expects grayscale images
|
||||
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend) #just emotion model expects grayscale images
|
||||
race_predictions = models['race'].predict(img_224)[0,:]
|
||||
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
||||
|
||||
@ -745,7 +745,7 @@ def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection
|
||||
input_shape_x, input_shape_y = functions.find_input_shape(model)
|
||||
|
||||
#detect and align
|
||||
img = functions.preprocess_face(img = img_path
|
||||
img, _ = functions.preprocess_face(img = img_path
|
||||
, target_size=(input_shape_y, input_shape_x)
|
||||
, enforce_detection = enforce_detection
|
||||
, detector_backend = detector_backend
|
||||
@ -810,8 +810,8 @@ def detectFace(img_path, detector_backend = 'opencv', enforce_detection = True,
|
||||
deteced and aligned face in numpy format
|
||||
"""
|
||||
|
||||
img = functions.preprocess_face(img = img_path, detector_backend = detector_backend
|
||||
, enforce_detection = enforce_detection, align = align)[0] #preprocess_face returns (1, 224, 224, 3)
|
||||
img, _ = functions.preprocess_face(img = img_path, detector_backend = detector_backend
|
||||
, enforce_detection = enforce_detection, align = align) #preprocess_face returns (1, 224, 224, 3)
|
||||
return img[:, :, ::-1] #bgr to rgb
|
||||
|
||||
#---------------------------
|
||||
|
@ -162,7 +162,7 @@ def normalize_input(img, normalization = 'base'):
|
||||
|
||||
return img
|
||||
|
||||
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False, align = True):
|
||||
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', align = True):
|
||||
|
||||
#img might be path, base64 or numpy array. Convert it to numpy whatever it is.
|
||||
img = load_image(img)
|
||||
@ -219,10 +219,7 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete
|
||||
|
||||
#---------------------------------------------------
|
||||
|
||||
if return_region == True:
|
||||
return img_pixels, region
|
||||
else:
|
||||
return img_pixels
|
||||
return img_pixels, region
|
||||
|
||||
def find_input_shape(model):
|
||||
|
||||
|
@ -96,7 +96,7 @@ def analysis(db_path, model_name = 'VGG-Face', detector_backend = 'opencv', dist
|
||||
embedding = []
|
||||
|
||||
#preprocess_face returns single face. this is expected for source images in db.
|
||||
img = functions.preprocess_face(img = employee, target_size = (input_shape_y, input_shape_x), enforce_detection = False, detector_backend = detector_backend)
|
||||
img, _ = functions.preprocess_face(img = employee, target_size = (input_shape_y, input_shape_x), enforce_detection = False, detector_backend = detector_backend)
|
||||
img_representation = model.predict(img)[0,:]
|
||||
|
||||
embedding.append(employee)
|
||||
@ -202,7 +202,7 @@ def analysis(db_path, model_name = 'VGG-Face', detector_backend = 'opencv', dist
|
||||
|
||||
if enable_face_analysis == True:
|
||||
|
||||
gray_img = functions.preprocess_face(img = custom_face, target_size = (48, 48), grayscale = True, enforce_detection = False, detector_backend = 'opencv')
|
||||
gray_img, _ = functions.preprocess_face(img = custom_face, target_size = (48, 48), grayscale = True, enforce_detection = False, detector_backend = 'opencv')
|
||||
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
|
||||
emotion_predictions = emotion_model.predict(gray_img)[0,:]
|
||||
sum_of_predictions = emotion_predictions.sum()
|
||||
@ -280,7 +280,7 @@ def analysis(db_path, model_name = 'VGG-Face', detector_backend = 'opencv', dist
|
||||
|
||||
#-------------------------------
|
||||
|
||||
face_224 = functions.preprocess_face(img = custom_face, target_size = (224, 224), grayscale = False, enforce_detection = False, detector_backend = 'opencv')
|
||||
face_224, _ = functions.preprocess_face(img = custom_face, target_size = (224, 224), grayscale = False, enforce_detection = False, detector_backend = 'opencv')
|
||||
|
||||
age_predictions = age_model.predict(face_224)[0,:]
|
||||
apparent_age = Age.findApparentAge(age_predictions)
|
||||
@ -335,7 +335,7 @@ def analysis(db_path, model_name = 'VGG-Face', detector_backend = 'opencv', dist
|
||||
#-------------------------------
|
||||
#face recognition
|
||||
|
||||
custom_face = functions.preprocess_face(img = custom_face, target_size = (input_shape_y, input_shape_x), enforce_detection = False, detector_backend = 'opencv')
|
||||
custom_face, _ = functions.preprocess_face(img = custom_face, target_size = (input_shape_y, input_shape_x), enforce_detection = False, detector_backend = 'opencv')
|
||||
|
||||
#check preprocess_face function handled
|
||||
if custom_face.shape[1:3] == input_shape:
|
||||
|
@ -22,11 +22,11 @@ print("model output shape: ", model.layers[-1].input_shape[-1])
|
||||
#load images and find embeddings
|
||||
|
||||
#img1 = functions.detectFace("dataset/img1.jpg", input_shape)
|
||||
img1 = functions.preprocess_face("dataset/img1.jpg", input_shape)
|
||||
img1, _ = functions.preprocess_face("dataset/img1.jpg", input_shape)
|
||||
img1_representation = model.predict(img1)[0,:]
|
||||
|
||||
#img2 = functions.detectFace("dataset/img3.jpg", input_shape)
|
||||
img2 = functions.preprocess_face("dataset/img3.jpg", input_shape)
|
||||
img2, _ = functions.preprocess_face("dataset/img3.jpg", input_shape)
|
||||
img2_representation = model.predict(img2)[0,:]
|
||||
|
||||
#----------------------------------------------
|
||||
|
Loading…
x
Reference in New Issue
Block a user