mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
issue 89 solved
This commit is contained in:
parent
414d58b508
commit
4f7c5273c9
@ -105,8 +105,8 @@ def verify(img1_path, img2_path=''
|
|||||||
input_shape = input_shape[1:3]
|
input_shape = input_shape[1:3]
|
||||||
|
|
||||||
|
|
||||||
img1 = functions.detectFace(img1_path, input_shape, enforce_detection = enforce_detection)
|
img1 = functions.preprocess_face(img1_path, input_shape, enforce_detection = enforce_detection)
|
||||||
img2 = functions.detectFace(img2_path, input_shape, enforce_detection = enforce_detection)
|
img2 = functions.preprocess_face(img2_path, input_shape, enforce_detection = enforce_detection)
|
||||||
|
|
||||||
img1_representation = custom_model.predict(img1)[0,:]
|
img1_representation = custom_model.predict(img1)[0,:]
|
||||||
img2_representation = custom_model.predict(img2)[0,:]
|
img2_representation = custom_model.predict(img2)[0,:]
|
||||||
@ -274,8 +274,8 @@ def verify(img1_path, img2_path=''
|
|||||||
#----------------------
|
#----------------------
|
||||||
#crop and align faces
|
#crop and align faces
|
||||||
|
|
||||||
img1 = functions.detectFace(img1_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
img1 = functions.preprocess_face(img1_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
||||||
img2 = functions.detectFace(img2_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
img2 = functions.preprocess_face(img2_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#find embeddings
|
#find embeddings
|
||||||
@ -422,7 +422,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
|
|||||||
|
|
||||||
if action == 'emotion':
|
if action == 'emotion':
|
||||||
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
||||||
img = functions.detectFace(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection)
|
img = functions.preprocess_face(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection)
|
||||||
|
|
||||||
emotion_predictions = emotion_model.predict(img)[0,:]
|
emotion_predictions = emotion_model.predict(img)[0,:]
|
||||||
|
|
||||||
@ -445,7 +445,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
|
|||||||
|
|
||||||
elif action == 'age':
|
elif action == 'age':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
||||||
#print("age prediction")
|
#print("age prediction")
|
||||||
age_predictions = age_model.predict(img_224)[0,:]
|
age_predictions = age_model.predict(img_224)[0,:]
|
||||||
apparent_age = Age.findApparentAge(age_predictions)
|
apparent_age = Age.findApparentAge(age_predictions)
|
||||||
@ -454,7 +454,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
|
|||||||
|
|
||||||
elif action == 'gender':
|
elif action == 'gender':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
||||||
#print("gender prediction")
|
#print("gender prediction")
|
||||||
|
|
||||||
gender_prediction = gender_model.predict(img_224)[0,:]
|
gender_prediction = gender_model.predict(img_224)[0,:]
|
||||||
@ -468,7 +468,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
|
|||||||
|
|
||||||
elif action == 'race':
|
elif action == 'race':
|
||||||
if img_224 is None:
|
if img_224 is None:
|
||||||
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
|
||||||
race_predictions = race_model.predict(img_224)[0,:]
|
race_predictions = race_model.predict(img_224)[0,:]
|
||||||
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
||||||
|
|
||||||
@ -516,12 +516,15 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
|
|||||||
|
|
||||||
|
|
||||||
def detectFace(img_path):
|
def detectFace(img_path):
|
||||||
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
|
img = functions.preprocess_face(img_path)[0] #preprocess_face returns (1, 224, 224, 3)
|
||||||
return img[:, :, ::-1] #bgr to rgb
|
return img[:, :, ::-1] #bgr to rgb
|
||||||
|
|
||||||
def find(img_path, db_path
|
def find(img_path, db_path
|
||||||
, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True):
|
, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True):
|
||||||
|
|
||||||
|
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
|
||||||
|
metric_names = ['cosine', 'euclidean', 'euclidean_l2']
|
||||||
|
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
|
|
||||||
if type(img_path) == list:
|
if type(img_path) == list:
|
||||||
@ -560,8 +563,6 @@ def find(img_path, db_path
|
|||||||
|
|
||||||
import lightgbm as lgb #lightgbm==2.3.1
|
import lightgbm as lgb #lightgbm==2.3.1
|
||||||
|
|
||||||
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
|
|
||||||
metric_names = ['cosine', 'euclidean', 'euclidean_l2']
|
|
||||||
models = {}
|
models = {}
|
||||||
|
|
||||||
pbar = tqdm(range(0, len(model_names)), desc='Face recognition models')
|
pbar = tqdm(range(0, len(model_names)), desc='Face recognition models')
|
||||||
@ -586,6 +587,8 @@ def find(img_path, db_path
|
|||||||
print("Already built model is passed")
|
print("Already built model is passed")
|
||||||
|
|
||||||
if model_name == 'Ensemble':
|
if model_name == 'Ensemble':
|
||||||
|
|
||||||
|
import lightgbm as lgb #lightgbm==2.3.1
|
||||||
|
|
||||||
#validate model dictionary because it might be passed from input as pre-trained
|
#validate model dictionary because it might be passed from input as pre-trained
|
||||||
|
|
||||||
@ -597,6 +600,8 @@ def find(img_path, db_path
|
|||||||
print("Ensemble learning will be applied for ", found_models," models")
|
print("Ensemble learning will be applied for ", found_models," models")
|
||||||
else:
|
else:
|
||||||
raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models)
|
raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models)
|
||||||
|
|
||||||
|
models = model.copy()
|
||||||
|
|
||||||
#threshold = functions.findThreshold(model_name, distance_metric)
|
#threshold = functions.findThreshold(model_name, distance_metric)
|
||||||
|
|
||||||
@ -655,7 +660,7 @@ def find(img_path, db_path
|
|||||||
|
|
||||||
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
||||||
|
|
||||||
img = functions.detectFace(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
||||||
representation = model.predict(img)[0,:]
|
representation = model.predict(img)[0,:]
|
||||||
|
|
||||||
instance = []
|
instance = []
|
||||||
@ -668,11 +673,11 @@ def find(img_path, db_path
|
|||||||
instance.append(employee)
|
instance.append(employee)
|
||||||
|
|
||||||
for j in model_names:
|
for j in model_names:
|
||||||
model = models[j]
|
ensemble_model = models[j]
|
||||||
|
|
||||||
#input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
|
#input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
|
||||||
|
|
||||||
input_shape = model.layers[0].input_shape
|
input_shape = ensemble_model.layers[0].input_shape
|
||||||
|
|
||||||
if type(input_shape) == list:
|
if type(input_shape) == list:
|
||||||
input_shape = input_shape[0][1:3]
|
input_shape = input_shape[0][1:3]
|
||||||
@ -681,8 +686,8 @@ def find(img_path, db_path
|
|||||||
|
|
||||||
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
||||||
|
|
||||||
img = functions.detectFace(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
||||||
representation = model.predict(img)[0,:]
|
representation = ensemble_model.predict(img)[0,:]
|
||||||
instance.append(representation)
|
instance.append(representation)
|
||||||
|
|
||||||
#-------------------------------
|
#-------------------------------
|
||||||
@ -715,19 +720,19 @@ def find(img_path, db_path
|
|||||||
|
|
||||||
if model_name == 'Ensemble':
|
if model_name == 'Ensemble':
|
||||||
for j in model_names:
|
for j in model_names:
|
||||||
model = models[j]
|
ensemble_model = models[j]
|
||||||
|
|
||||||
#input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
|
#input_shape = ensemble_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
|
||||||
|
|
||||||
input_shape = model.layers[0].input_shape
|
input_shape = ensemble_model.layers[0].input_shape
|
||||||
|
|
||||||
if type(input_shape) == list:
|
if type(input_shape) == list:
|
||||||
input_shape = input_shape[0][1:3]
|
input_shape = input_shape[0][1:3]
|
||||||
else:
|
else:
|
||||||
input_shape = input_shape[1:3]
|
input_shape = input_shape[1:3]
|
||||||
|
|
||||||
img = functions.detectFace(img_path, input_shape, enforce_detection = enforce_detection)
|
img = functions.preprocess_face(img_path, input_shape, enforce_detection = enforce_detection)
|
||||||
target_representation = model.predict(img)[0,:]
|
target_representation = ensemble_model.predict(img)[0,:]
|
||||||
|
|
||||||
for k in metric_names:
|
for k in metric_names:
|
||||||
distances = []
|
distances = []
|
||||||
@ -818,7 +823,7 @@ def find(img_path, db_path
|
|||||||
|
|
||||||
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
||||||
|
|
||||||
img = functions.detectFace(img_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
img = functions.preprocess_face(img_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
|
||||||
target_representation = model.predict(img)[0,:]
|
target_representation = model.predict(img)[0,:]
|
||||||
|
|
||||||
distances = []
|
distances = []
|
||||||
|
@ -163,7 +163,7 @@ def get_opencv_path():
|
|||||||
|
|
||||||
return path+"/data/"
|
return path+"/data/"
|
||||||
|
|
||||||
def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
|
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
|
||||||
|
|
||||||
img_path = ""
|
img_path = ""
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
employee = employees[index]
|
employee = employees[index]
|
||||||
pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1]))
|
pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1]))
|
||||||
embedding = []
|
embedding = []
|
||||||
img = functions.detectFace(employee, (input_shape_y, input_shape_x))
|
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x))
|
||||||
img_representation = model.predict(img)[0,:]
|
img_representation = model.predict(img)[0,:]
|
||||||
|
|
||||||
embedding.append(employee)
|
embedding.append(employee)
|
||||||
@ -222,7 +222,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
|
|
||||||
if enable_face_analysis == True:
|
if enable_face_analysis == True:
|
||||||
|
|
||||||
gray_img = functions.detectFace(custom_face, (48, 48), True)
|
gray_img = functions.preprocess_face(custom_face, (48, 48), True)
|
||||||
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
|
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
|
||||||
emotion_predictions = emotion_model.predict(gray_img)[0,:]
|
emotion_predictions = emotion_model.predict(gray_img)[0,:]
|
||||||
sum_of_predictions = emotion_predictions.sum()
|
sum_of_predictions = emotion_predictions.sum()
|
||||||
@ -300,7 +300,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
|
|
||||||
#-------------------------------
|
#-------------------------------
|
||||||
|
|
||||||
face_224 = functions.detectFace(custom_face, (224, 224), False)
|
face_224 = functions.preprocess_face(custom_face, (224, 224), False)
|
||||||
|
|
||||||
age_predictions = age_model.predict(face_224)[0,:]
|
age_predictions = age_model.predict(face_224)[0,:]
|
||||||
apparent_age = Age.findApparentAge(age_predictions)
|
apparent_age = Age.findApparentAge(age_predictions)
|
||||||
@ -355,9 +355,9 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
#-------------------------------
|
#-------------------------------
|
||||||
#face recognition
|
#face recognition
|
||||||
|
|
||||||
custom_face = functions.detectFace(custom_face, (input_shape_y, input_shape_x))
|
custom_face = functions.preprocess_face(custom_face, (input_shape_y, input_shape_x))
|
||||||
|
|
||||||
#check detectFace function handled
|
#check preprocess_face function handled
|
||||||
if custom_face.shape[1:3] == input_shape:
|
if custom_face.shape[1:3] == input_shape:
|
||||||
if df.shape[0] > 0: #if there are images to verify, apply face recognition
|
if df.shape[0] > 0: #if there are images to verify, apply face recognition
|
||||||
img1_representation = model.predict(custom_face)[0,:]
|
img1_representation = model.predict(custom_face)[0,:]
|
||||||
|
@ -5,7 +5,10 @@ import os
|
|||||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
||||||
#-----------------------------------------
|
#-----------------------------------------
|
||||||
|
|
||||||
print("Bulk tests")
|
dataset = [
|
||||||
|
['dataset/img1.jpg', 'dataset/img2.jpg', True],
|
||||||
|
['dataset/img5.jpg', 'dataset/img6.jpg', True]
|
||||||
|
]
|
||||||
|
|
||||||
print("-----------------------------------------")
|
print("-----------------------------------------")
|
||||||
|
|
||||||
@ -24,20 +27,7 @@ print(df.head())
|
|||||||
|
|
||||||
print("-----------------------------------------")
|
print("-----------------------------------------")
|
||||||
|
|
||||||
print("Bulk face recognition tests")
|
print("Ensemble for verify function")
|
||||||
|
|
||||||
dataset = [
|
|
||||||
['dataset/img1.jpg', 'dataset/img2.jpg', True],
|
|
||||||
['dataset/img5.jpg', 'dataset/img6.jpg', True]
|
|
||||||
]
|
|
||||||
|
|
||||||
resp_obj = DeepFace.verify(dataset)
|
|
||||||
print(resp_obj["pair_1"]["verified"] == True)
|
|
||||||
print(resp_obj["pair_2"]["verified"] == True)
|
|
||||||
|
|
||||||
print("-----------------------------------------")
|
|
||||||
|
|
||||||
print("Ensemble learning bulk")
|
|
||||||
resp_obj = DeepFace.verify(dataset, model_name = "Ensemble")
|
resp_obj = DeepFace.verify(dataset, model_name = "Ensemble")
|
||||||
|
|
||||||
for i in range(0, len(dataset)):
|
for i in range(0, len(dataset)):
|
||||||
@ -48,6 +38,14 @@ for i in range(0, len(dataset)):
|
|||||||
|
|
||||||
print("-----------------------------------------")
|
print("-----------------------------------------")
|
||||||
|
|
||||||
|
print("Bulk face recognition tests")
|
||||||
|
|
||||||
|
resp_obj = DeepFace.verify(dataset)
|
||||||
|
print(resp_obj["pair_1"]["verified"] == True)
|
||||||
|
print(resp_obj["pair_2"]["verified"] == True)
|
||||||
|
|
||||||
|
print("-----------------------------------------")
|
||||||
|
|
||||||
print("Bulk facial analysis tests")
|
print("Bulk facial analysis tests")
|
||||||
|
|
||||||
dataset = [
|
dataset = [
|
||||||
@ -159,12 +157,16 @@ else:
|
|||||||
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
||||||
|
|
||||||
#-----------------------------------
|
#-----------------------------------
|
||||||
|
print("--------------------------")
|
||||||
|
print("Verify function with passing pre-trained model")
|
||||||
|
|
||||||
vggface_model = VGGFace.loadModel()
|
vggface_model = VGGFace.loadModel()
|
||||||
resp_obj = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", model_name = "VGG-Face", model = vggface_model)
|
resp_obj = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", model_name = "VGG-Face", model = vggface_model)
|
||||||
print(resp_obj)
|
print(resp_obj)
|
||||||
|
|
||||||
#-----------------------------------
|
#-----------------------------------
|
||||||
|
print("--------------------------")
|
||||||
|
print("Analyze function with passing pre-trained model")
|
||||||
|
|
||||||
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
||||||
|
|
||||||
@ -180,3 +182,8 @@ facial_attribute_models["gender"] = gender_model
|
|||||||
facial_attribute_models["race"] = race_model
|
facial_attribute_models["race"] = race_model
|
||||||
|
|
||||||
resp_obj = DeepFace.analyze("dataset/img1.jpg", models=facial_attribute_models)
|
resp_obj = DeepFace.analyze("dataset/img1.jpg", models=facial_attribute_models)
|
||||||
|
print(resp_obj)
|
||||||
|
|
||||||
|
#-----------------------------------
|
||||||
|
|
||||||
|
print("--------------------------")
|
Loading…
x
Reference in New Issue
Block a user