Merge pull request #135 from ma7555/master

Loads MTCNN once only + convert to RGB before inferenence + add better readibility
This commit is contained in:
Sefik Ilkin Serengil 2020-11-29 12:11:58 +03:00 committed by GitHub
commit ba480a323b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 47 additions and 86 deletions

View File

@ -10,18 +10,20 @@ import numpy as np
import pandas as pd import pandas as pd
from tqdm import tqdm from tqdm import tqdm
import json import json
import cv2
from keras import backend as K
import keras
import tensorflow as tf
import pickle import pickle
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst from deepface.commons import functions, realtime, distance as dst
def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True, detector_backend = 'opencv'): def DlibResNet_():
from deepface.basemodels.DlibResNet import DlibResNet
return DlibResNet()
def verify(img1_path, img2_path = '', model_name='VGG-Face', distance_metric='cosine',
model=None, enforce_detection=True, detector_backend = 'opencv'):
# this is not a must because it is very huge.
tic = time.time() tic = time.time()
@ -34,6 +36,9 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
#------------------------------ #------------------------------
if detector_backend == 'mtcnn':
functions.load_mtcnn()
resp_objects = [] resp_objects = []
if model_name == 'Ensemble': if model_name == 'Ensemble':
@ -199,33 +204,23 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
#ensemble learning disabled #ensemble learning disabled
if model == None: if model == None:
if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.")
model = VGGFace.loadModel()
elif model_name == 'OpenFace': models = {
print("Using OpenFace model backend", distance_metric,"distance.") 'VGG-Face': VGGFace.loadModel,
model = OpenFace.loadModel() 'OpenFace': OpenFace.loadModel,
'Facenet': Facenet.loadModel,
elif model_name == 'Facenet': 'DeepFace': FbDeepFace.loadModel,
print("Using Facenet model backend", distance_metric,"distance.") 'DeepID': DeepID.loadModel,
model = Facenet.loadModel() 'Dlib': DlibResNet_
}
elif model_name == 'DeepFace':
print("Using FB DeepFace model backend", distance_metric,"distance.")
model = FbDeepFace.loadModel()
elif model_name == 'DeepID':
print("Using DeepID2 model backend", distance_metric,"distance.")
model = DeepID.loadModel()
elif model_name == 'Dlib':
print("Using Dlib ResNet model backend", distance_metric,"distance.")
from deepface.basemodels.DlibResNet import DlibResNet #this is not a must because it is very huge.
model = DlibResNet()
model = models.get(model_name)
if model:
model = model()
print('Using {} model backend and {} distance'.format(model_name, distance_metric))
else: else:
raise ValueError("Invalid model_name passed - ", model_name) raise ValueError('Invalid model_name passed - {}'.format(model_name))
else: #model != None else: #model != None
print("Already built model is passed") print("Already built model is passed")
@ -344,7 +339,6 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
return resp_obj return resp_obj
#return resp_objects #return resp_objects
def analyze(img_path, actions = [], models = {}, enforce_detection = True, detector_backend = 'opencv'): def analyze(img_path, actions = [], models = {}, enforce_detection = True, detector_backend = 'opencv'):
if type(img_path) == list: if type(img_path) == list:
@ -513,7 +507,6 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True, detec
return resp_obj return resp_obj
#return resp_objects #return resp_objects
def detectFace(img_path, detector_backend = 'opencv'): def detectFace(img_path, detector_backend = 'opencv'):
img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3) img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb return img[:, :, ::-1] #bgr to rgb

View File

@ -20,6 +20,10 @@ import bz2
from deepface.commons import distance from deepface.commons import distance
from mtcnn import MTCNN #0.1.0 from mtcnn import MTCNN #0.1.0
def load_mtcnn():
global mtcnn_detector
mtcnn_detector = MTCNN()
def loadBase64Img(uri): def loadBase64Img(uri):
encoded_data = uri.split(',')[1] encoded_data = uri.split(',')[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8) nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
@ -40,55 +44,18 @@ def initializeFolder():
def findThreshold(model_name, distance_metric): def findThreshold(model_name, distance_metric):
threshold = 0.40 base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}
if model_name == 'VGG-Face': thresholds = {
if distance_metric == 'cosine': 'VGG-Face': {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75},
threshold = 0.40 'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55},
elif distance_metric == 'euclidean': 'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80},
threshold = 0.55 'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64},
elif distance_metric == 'euclidean_l2': 'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17},
threshold = 0.75 'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.6}
}
elif model_name == 'OpenFace': threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
if distance_metric == 'cosine':
threshold = 0.10
elif distance_metric == 'euclidean':
threshold = 0.55
elif distance_metric == 'euclidean_l2':
threshold = 0.55
elif model_name == 'Facenet':
if distance_metric == 'cosine':
threshold = 0.40
elif distance_metric == 'euclidean':
threshold = 10
elif distance_metric == 'euclidean_l2':
threshold = 0.80
elif model_name == 'DeepFace':
if distance_metric == 'cosine':
threshold = 0.23
elif distance_metric == 'euclidean':
threshold = 64
elif distance_metric == 'euclidean_l2':
threshold = 0.64
elif model_name == 'DeepID':
if distance_metric == 'cosine':
threshold = 0.015
elif distance_metric == 'euclidean':
threshold = 45
elif distance_metric == 'euclidean_l2':
threshold = 0.17
elif model_name == 'Dlib':
if distance_metric == 'cosine':
threshold = 0.07
elif distance_metric == 'euclidean':
threshold = 0.60
elif distance_metric == 'euclidean_l2':
threshold = 0.60
return threshold return threshold
@ -277,9 +244,9 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
elif detector_backend == 'mtcnn': elif detector_backend == 'mtcnn':
mtcnn_detector = MTCNN() # mtcnn_detector = MTCNN()
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
detections = mtcnn_detector.detect_faces(img) detections = mtcnn_detector.detect_faces(img_rgb)
if len(detections) > 0: if len(detections) > 0:
detection = detections[0] detection = detections[0]
@ -288,7 +255,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
return detected_face return detected_face
else: #if no face detected else: #if no face detected
if enforce_detection != True: if not enforce_detection:
return img return img
else: else:
@ -432,8 +399,9 @@ def align_face(img, detector_backend = 'opencv'):
elif detector_backend == 'mtcnn': elif detector_backend == 'mtcnn':
mtcnn_detector = MTCNN() # mtcnn_detector = MTCNN()
detections = mtcnn_detector.detect_faces(img) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
detections = mtcnn_detector.detect_faces(img_rgb)
if len(detections) > 0: if len(detections) > 0:
detection = detections[0] detection = detections[0]