issue 89 solved

This commit is contained in:
Şefik Serangil 2020-09-03 09:13:35 +03:00
parent 414d58b508
commit 4f7c5273c9
4 changed files with 56 additions and 44 deletions

View File

@ -105,8 +105,8 @@ def verify(img1_path, img2_path=''
input_shape = input_shape[1:3]
img1 = functions.detectFace(img1_path, input_shape, enforce_detection = enforce_detection)
img2 = functions.detectFace(img2_path, input_shape, enforce_detection = enforce_detection)
img1 = functions.preprocess_face(img1_path, input_shape, enforce_detection = enforce_detection)
img2 = functions.preprocess_face(img2_path, input_shape, enforce_detection = enforce_detection)
img1_representation = custom_model.predict(img1)[0,:]
img2_representation = custom_model.predict(img2)[0,:]
@ -274,8 +274,8 @@ def verify(img1_path, img2_path=''
#----------------------
#crop and align faces
img1 = functions.detectFace(img1_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
img2 = functions.detectFace(img2_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
img1 = functions.preprocess_face(img1_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
img2 = functions.preprocess_face(img2_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
#----------------------
#find embeddings
@ -422,7 +422,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection)
img = functions.preprocess_face(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection)
emotion_predictions = emotion_model.predict(img)[0,:]
@ -445,7 +445,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
elif action == 'age':
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
#print("age prediction")
age_predictions = age_model.predict(img_224)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
@ -454,7 +454,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
elif action == 'gender':
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
#print("gender prediction")
gender_prediction = gender_model.predict(img_224)[0,:]
@ -468,7 +468,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
elif action == 'race':
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.preprocess_face(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
race_predictions = race_model.predict(img_224)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
@ -516,12 +516,15 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
img = functions.preprocess_face(img_path)[0] #preprocess_face returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb
def find(img_path, db_path
, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True):
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
metric_names = ['cosine', 'euclidean', 'euclidean_l2']
tic = time.time()
if type(img_path) == list:
@ -560,8 +563,6 @@ def find(img_path, db_path
import lightgbm as lgb #lightgbm==2.3.1
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
metric_names = ['cosine', 'euclidean', 'euclidean_l2']
models = {}
pbar = tqdm(range(0, len(model_names)), desc='Face recognition models')
@ -586,6 +587,8 @@ def find(img_path, db_path
print("Already built model is passed")
if model_name == 'Ensemble':
import lightgbm as lgb #lightgbm==2.3.1
#validate model dictionary because it might be passed from input as pre-trained
@ -597,6 +600,8 @@ def find(img_path, db_path
print("Ensemble learning will be applied for ", found_models," models")
else:
raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models)
models = model.copy()
#threshold = functions.findThreshold(model_name, distance_metric)
@ -655,7 +660,7 @@ def find(img_path, db_path
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
img = functions.detectFace(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
representation = model.predict(img)[0,:]
instance = []
@ -668,11 +673,11 @@ def find(img_path, db_path
instance.append(employee)
for j in model_names:
model = models[j]
ensemble_model = models[j]
#input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
input_shape = model.layers[0].input_shape
input_shape = ensemble_model.layers[0].input_shape
if type(input_shape) == list:
input_shape = input_shape[0][1:3]
@ -681,8 +686,8 @@ def find(img_path, db_path
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
img = functions.detectFace(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
representation = model.predict(img)[0,:]
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
representation = ensemble_model.predict(img)[0,:]
instance.append(representation)
#-------------------------------
@ -715,19 +720,19 @@ def find(img_path, db_path
if model_name == 'Ensemble':
for j in model_names:
model = models[j]
ensemble_model = models[j]
#input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
#input_shape = ensemble_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
input_shape = model.layers[0].input_shape
input_shape = ensemble_model.layers[0].input_shape
if type(input_shape) == list:
input_shape = input_shape[0][1:3]
else:
input_shape = input_shape[1:3]
img = functions.detectFace(img_path, input_shape, enforce_detection = enforce_detection)
target_representation = model.predict(img)[0,:]
img = functions.preprocess_face(img_path, input_shape, enforce_detection = enforce_detection)
target_representation = ensemble_model.predict(img)[0,:]
for k in metric_names:
distances = []
@ -818,7 +823,7 @@ def find(img_path, db_path
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
img = functions.detectFace(img_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
img = functions.preprocess_face(img_path, (input_shape_y, input_shape_x), enforce_detection = enforce_detection)
target_representation = model.predict(img)[0,:]
distances = []

View File

@ -163,7 +163,7 @@ def get_opencv_path():
return path+"/data/"
def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
img_path = ""

View File

@ -112,7 +112,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
employee = employees[index]
pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1]))
embedding = []
img = functions.detectFace(employee, (input_shape_y, input_shape_x))
img = functions.preprocess_face(employee, (input_shape_y, input_shape_x))
img_representation = model.predict(img)[0,:]
embedding.append(employee)
@ -222,7 +222,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
if enable_face_analysis == True:
gray_img = functions.detectFace(custom_face, (48, 48), True)
gray_img = functions.preprocess_face(custom_face, (48, 48), True)
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
emotion_predictions = emotion_model.predict(gray_img)[0,:]
sum_of_predictions = emotion_predictions.sum()
@ -300,7 +300,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
#-------------------------------
face_224 = functions.detectFace(custom_face, (224, 224), False)
face_224 = functions.preprocess_face(custom_face, (224, 224), False)
age_predictions = age_model.predict(face_224)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
@ -355,9 +355,9 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
#-------------------------------
#face recognition
custom_face = functions.detectFace(custom_face, (input_shape_y, input_shape_x))
custom_face = functions.preprocess_face(custom_face, (input_shape_y, input_shape_x))
#check detectFace function handled
#check preprocess_face function handled
if custom_face.shape[1:3] == input_shape:
if df.shape[0] > 0: #if there are images to verify, apply face recognition
img1_representation = model.predict(custom_face)[0,:]

View File

@ -5,7 +5,10 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------
print("Bulk tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
print("-----------------------------------------")
@ -24,20 +27,7 @@ print(df.head())
print("-----------------------------------------")
print("Bulk face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)
print("-----------------------------------------")
print("Ensemble learning bulk")
print("Ensemble for verify function")
resp_obj = DeepFace.verify(dataset, model_name = "Ensemble")
for i in range(0, len(dataset)):
@ -48,6 +38,14 @@ for i in range(0, len(dataset)):
print("-----------------------------------------")
print("Bulk face recognition tests")
resp_obj = DeepFace.verify(dataset)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)
print("-----------------------------------------")
print("Bulk facial analysis tests")
dataset = [
@ -159,12 +157,16 @@ else:
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#-----------------------------------
print("--------------------------")
print("Verify function with passing pre-trained model")
vggface_model = VGGFace.loadModel()
resp_obj = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", model_name = "VGG-Face", model = vggface_model)
print(resp_obj)
#-----------------------------------
print("--------------------------")
print("Analyze function with passing pre-trained model")
from deepface.extendedmodels import Age, Gender, Race, Emotion
@ -180,3 +182,8 @@ facial_attribute_models["gender"] = gender_model
facial_attribute_models["race"] = race_model
resp_obj = DeepFace.analyze("dataset/img1.jpg", models=facial_attribute_models)
print(resp_obj)
#-----------------------------------
print("--------------------------")