From 45f17417affa349ac19f285af33e0da118cbd3bc Mon Sep 17 00:00:00 2001 From: serengil Date: Mon, 30 Nov 2020 16:50:49 +0300 Subject: [PATCH] boost verify --- deepface/DeepFace.py | 189 ++------------------------------ deepface/basemodels/Boosting.py | 179 ++++++++++++++++++++++++++++++ deepface/commons/functions.py | 18 --- 3 files changed, 190 insertions(+), 196 deletions(-) create mode 100644 deepface/basemodels/Boosting.py diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py index b91d7bf..891e990 100644 --- a/deepface/DeepFace.py +++ b/deepface/DeepFace.py @@ -4,13 +4,12 @@ warnings.filterwarnings("ignore") import time import os from os import path -from pathlib import Path import numpy as np import pandas as pd from tqdm import tqdm import pickle -from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID, DlibWrapper +from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID, DlibWrapper, Boosting from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.commons import functions, realtime, distance as dst @@ -48,150 +47,19 @@ def verify(img1_path, img2_path = '', model_name = 'VGG-Face', distance_metric = resp_objects = [] - if model_name == 'Ensemble': - print("Ensemble learning enabled") - - import lightgbm as lgb #lightgbm==2.3.1 - - if model == None: - model = {} - - model_pbar = tqdm(range(0, 4), desc='Face recognition models') - - for index in model_pbar: - - if index == 0: - model_pbar.set_description("Loading VGG-Face") - model["VGG-Face"] = build_model('VGG-Face') - elif index == 1: - model_pbar.set_description("Loading Google FaceNet") - model["Facenet"] = build_model('Facenet') - elif index == 2: - model_pbar.set_description("Loading OpenFace") - model["OpenFace"] = build_model('OpenFace') - elif index == 3: - model_pbar.set_description("Loading Facebook DeepFace") - model["DeepFace"] = build_model('DeepFace') - - #-------------------------- - #validate model dictionary because it might be passed from input as pre-trained - - found_models = [] - for key, value in model.items(): - found_models.append(key) - - if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models): - print("Ensemble learning will be applied for ", found_models," models") - else: - raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models) - - #-------------------------- - - model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"] - metrics = ["cosine", "euclidean", "euclidean_l2"] - - pbar = tqdm(range(0,len(img_list)), desc='Verification') - - #for instance in img_list: - for index in pbar: - instance = img_list[index] - - if type(instance) == list and len(instance) >= 2: - img1_path = instance[0] - img2_path = instance[1] - - ensemble_features = []; ensemble_features_string = "[" - - for i in model_names: - custom_model = model[i] - - #input_shape = custom_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. + #-------------------------------- - input_shape = custom_model.layers[0].input_shape - - if type(input_shape) == list: - input_shape = input_shape[0][1:3] - else: - input_shape = input_shape[1:3] - - - img1 = functions.preprocess_face(img = img1_path, target_size = input_shape, enforce_detection = enforce_detection, detector_backend = detector_backend) - img2 = functions.preprocess_face(img = img2_path, target_size = input_shape, enforce_detection = enforce_detection, detector_backend = detector_backend) - - img1_representation = custom_model.predict(img1)[0,:] - img2_representation = custom_model.predict(img2)[0,:] - - for j in metrics: - if j == 'cosine': - distance = dst.findCosineDistance(img1_representation, img2_representation) - elif j == 'euclidean': - distance = dst.findEuclideanDistance(img1_representation, img2_representation) - elif j == 'euclidean_l2': - distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) - - if i == 'OpenFace' and j == 'euclidean': #this returns same with OpenFace - euclidean_l2 - continue - else: - - ensemble_features.append(distance) - - if len(ensemble_features) > 1: - ensemble_features_string += ", " - ensemble_features_string += str(distance) - - #print("ensemble_features: ", ensemble_features) - ensemble_features_string += "]" - - #------------------------------- - - deepface_ensemble = functions.boosting_method() - - #--------------------------- - - prediction = deepface_ensemble.predict(np.expand_dims(np.array(ensemble_features), axis=0))[0] - - verified = np.argmax(prediction) == 1 - - score = prediction[np.argmax(prediction)] - - #print("verified: ", verified,", score: ", score) - - resp_obj = { - "verified": verified - , "score": score - , "distance": ensemble_features_string - , "model": ["VGG-Face", "Facenet", "OpenFace", "DeepFace"] - , "similarity_metric": ["cosine", "euclidean", "euclidean_l2"] - } - - if bulkProcess == True: - resp_objects.append(resp_obj) - else: - return resp_obj - - #------------------------------- - - if bulkProcess == True: - - resp_obj = {} - for i in range(0, len(resp_objects)): - resp_item = resp_objects[i] - resp_obj["pair_%d" % (i+1)] = resp_item - - return resp_obj - - return None + if model_name == 'Ensemble': + return Boosting.verify(model = model, img_list = img_list, bulkProcess = bulkProcess, enforce_detection = enforce_detection, detector_backend = detector_backend) #ensemble learning block end + #-------------------------------- #ensemble learning disabled if model == None: model = build_model(model_name) - """else: #model != None - print("Already built model is passed")""" - #------------------------------ #face recognition models have different size of inputs #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. @@ -458,28 +326,8 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', if model == None: if model_name == 'Ensemble': - print("Ensemble learning enabled") - #TODO: include DeepID in ensemble method - - import lightgbm as lgb #lightgbm==2.3.1 - - models = {} - - pbar = tqdm(range(0, len(model_names)), desc='Face recognition models') - - for index in pbar: - if index == 0: - pbar.set_description("Loading VGG-Face") - models['VGG-Face'] = build_model('VGG-Face') - elif index == 1: - pbar.set_description("Loading FaceNet") - models['Facenet'] = build_model('Facenet') - elif index == 2: - pbar.set_description("Loading OpenFace") - models['OpenFace'] = build_model('OpenFace') - elif index == 3: - pbar.set_description("Loading DeepFace") - models['DeepFace'] = build_model('DeepFace') + print("Ensemble learning enabled") + models = Boosting.loadModel() else: #model is not ensemble model = build_model(model_name) @@ -488,24 +336,10 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', print("Already built model is passed") if model_name == 'Ensemble': - - import lightgbm as lgb #lightgbm==2.3.1 - - #validate model dictionary because it might be passed from input as pre-trained - - found_models = [] - for key, value in model.items(): - found_models.append(key) - - if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models): - print("Ensemble learning will be applied for ", found_models," models") - else: - raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models) + Boosting.validate_model(model) models = model.copy() - - #threshold = functions.findThreshold(model_name, distance_metric) - + #--------------------------------------- file_name = "representations_%s.pkl" % (model_name) @@ -666,7 +500,7 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', #---------------------------------- #lightgbm model - deepface_ensemble = functions.boosting_method() + deepface_ensemble = Boosting.build_gbm() y = deepface_ensemble.predict(x) @@ -747,7 +581,7 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', raise ValueError("Passed db_path does not exist!") return None - + def stream(db_path = '', model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True): functions.initialize_detector(detector_backend = 'opencv') @@ -790,4 +624,3 @@ def initialize_input(img1_path, img2_path = None): #main functions.initializeFolder() - diff --git a/deepface/basemodels/Boosting.py b/deepface/basemodels/Boosting.py new file mode 100644 index 0000000..6311064 --- /dev/null +++ b/deepface/basemodels/Boosting.py @@ -0,0 +1,179 @@ +from deepface import DeepFace +from tqdm import tqdm +import os +from os import path +from pathlib import Path +import numpy as np +import lightgbm as lgb #lightgbm==2.3.1 + +from deepface.commons import functions, distance as dst + +def loadModel(): + model = {} + + model_pbar = tqdm(range(0, 4), desc='Face recognition models') + + for index in model_pbar: + + if index == 0: + model_pbar.set_description("Loading VGG-Face") + model["VGG-Face"] = DeepFace.build_model('VGG-Face') + elif index == 1: + model_pbar.set_description("Loading Google FaceNet") + model["Facenet"] = DeepFace.build_model('Facenet') + elif index == 2: + model_pbar.set_description("Loading OpenFace") + model["OpenFace"] = DeepFace.build_model('OpenFace') + elif index == 3: + model_pbar.set_description("Loading Facebook DeepFace") + model["DeepFace"] = DeepFace.build_model('DeepFace') + + return model + +def validate_model(model): + #validate model dictionary because it might be passed from input as pre-trained + found_models = [] + for key, value in model.items(): + found_models.append(key) + + if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models): + print("Ensemble learning will be applied for ", found_models," models") + else: + raise ValueError("You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+found_models) + +def build_gbm(): + + home = str(Path.home()) + + if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True: + print("face-recognition-ensemble-model.txt will be downloaded...") + url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt' + output = home+'/.deepface/weights/face-recognition-ensemble-model.txt' + gdown.download(url, output, quiet=False) + + ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt' + + deepface_ensemble = lgb.Booster(model_file = ensemble_model_path) + + return deepface_ensemble + +def verify(model, img_list, bulkProcess, enforce_detection, detector_backend): + print("Ensemble learning enabled") + + if model == None: + model = loadModel() + + validate_model(model) + + #-------------------------- + + model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"] + metrics = ["cosine", "euclidean", "euclidean_l2"] + + resp_objects = [] + + #-------------------------- + + if model == None: + model = loadModel() + + #-------------------------- + + validate_model(model) + + #-------------------------- + + pbar = tqdm(range(0,len(img_list)), desc='Verification') + + for index in pbar: + instance = img_list[index] + + if type(instance) == list and len(instance) >= 2: + img1_path = instance[0] + img2_path = instance[1] + + ensemble_features = []; ensemble_features_string = "[" + + for i in model_names: + custom_model = model[i] + + input_shape = custom_model.layers[0].input_shape + + if type(input_shape) == list: + input_shape = input_shape[0][1:3] + else: + input_shape = input_shape[1:3] + + #---------------------------------- + + img1 = functions.preprocess_face(img = img1_path, target_size = input_shape + , enforce_detection = enforce_detection + , detector_backend = detector_backend) + + img2 = functions.preprocess_face(img = img2_path, target_size = input_shape + , enforce_detection = enforce_detection + , detector_backend = detector_backend) + + img1_representation = custom_model.predict(img1)[0,:] + img2_representation = custom_model.predict(img2)[0,:] + + for j in metrics: + if j == 'cosine': + distance = dst.findCosineDistance(img1_representation, img2_representation) + elif j == 'euclidean': + distance = dst.findEuclideanDistance(img1_representation, img2_representation) + elif j == 'euclidean_l2': + distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) + + #------------------------ + + #this returns same with OpenFace - euclidean_l2 + if i == 'OpenFace' and j == 'euclidean': + continue + else: + ensemble_features.append(distance) + + if len(ensemble_features) > 1: + ensemble_features_string += ", " + + ensemble_features_string += str(distance) + + ensemble_features_string += "]" + + #------------------------------- + + deepface_ensemble = build_gbm() + + #------------------------------- + + prediction = deepface_ensemble.predict(np.expand_dims(np.array(ensemble_features), axis=0))[0] + + verified = np.argmax(prediction) == 1 + + score = prediction[np.argmax(prediction)] + + #print("verified: ", verified,", score: ", score) + + resp_obj = { + "verified": verified + , "score": score + , "distance": ensemble_features_string + , "model": ["VGG-Face", "Facenet", "OpenFace", "DeepFace"] + , "similarity_metric": ["cosine", "euclidean", "euclidean_l2"] + } + + if bulkProcess == True: + resp_objects.append(resp_obj) + else: + return resp_obj + + if bulkProcess == True: + resp_obj = {} + + for i in range(0, len(resp_objects)): + resp_item = resp_objects[i] + resp_obj["pair_%d" % (i+1)] = resp_item + + return resp_obj + + \ No newline at end of file diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py index 89ba10e..645ab83 100644 --- a/deepface/commons/functions.py +++ b/deepface/commons/functions.py @@ -447,21 +447,3 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete img_pixels /= 255 #normalize input in [0, 1] return img_pixels - -def boosting_method(): - - import lightgbm as lgb #lightgbm==2.3.1 - - home = str(Path.home()) - - if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True: - print("face-recognition-ensemble-model.txt will be downloaded...") - url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt' - output = home+'/.deepface/weights/face-recognition-ensemble-model.txt' - gdown.download(url, output, quiet=False) - - ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt' - - deepface_ensemble = lgb.Booster(model_file = ensemble_model_path) - - return deepface_ensemble