mirror of
https://github.com/serengil/deepface.git
synced 2025-06-08 12:35:22 +00:00
Merge pull request #135 from ma7555/master
Loads MTCNN once only + convert to RGB before inferenence + add better readibility
This commit is contained in:
commit
ba480a323b
@ -10,18 +10,20 @@ import numpy as np
|
||||
import pandas as pd
|
||||
from tqdm import tqdm
|
||||
import json
|
||||
import cv2
|
||||
from keras import backend as K
|
||||
import keras
|
||||
import tensorflow as tf
|
||||
import pickle
|
||||
|
||||
from deepface import DeepFace
|
||||
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID
|
||||
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
||||
from deepface.commons import functions, realtime, distance as dst
|
||||
|
||||
def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True, detector_backend = 'opencv'):
|
||||
def DlibResNet_():
|
||||
from deepface.basemodels.DlibResNet import DlibResNet
|
||||
return DlibResNet()
|
||||
|
||||
def verify(img1_path, img2_path = '', model_name='VGG-Face', distance_metric='cosine',
|
||||
model=None, enforce_detection=True, detector_backend = 'opencv'):
|
||||
|
||||
# this is not a must because it is very huge.
|
||||
|
||||
tic = time.time()
|
||||
|
||||
@ -34,6 +36,9 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
|
||||
|
||||
#------------------------------
|
||||
|
||||
if detector_backend == 'mtcnn':
|
||||
functions.load_mtcnn()
|
||||
|
||||
resp_objects = []
|
||||
|
||||
if model_name == 'Ensemble':
|
||||
@ -199,33 +204,23 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
|
||||
#ensemble learning disabled
|
||||
|
||||
if model == None:
|
||||
if model_name == 'VGG-Face':
|
||||
print("Using VGG-Face model backend and", distance_metric,"distance.")
|
||||
model = VGGFace.loadModel()
|
||||
|
||||
elif model_name == 'OpenFace':
|
||||
print("Using OpenFace model backend", distance_metric,"distance.")
|
||||
model = OpenFace.loadModel()
|
||||
|
||||
elif model_name == 'Facenet':
|
||||
print("Using Facenet model backend", distance_metric,"distance.")
|
||||
model = Facenet.loadModel()
|
||||
|
||||
elif model_name == 'DeepFace':
|
||||
print("Using FB DeepFace model backend", distance_metric,"distance.")
|
||||
model = FbDeepFace.loadModel()
|
||||
|
||||
elif model_name == 'DeepID':
|
||||
print("Using DeepID2 model backend", distance_metric,"distance.")
|
||||
model = DeepID.loadModel()
|
||||
|
||||
elif model_name == 'Dlib':
|
||||
print("Using Dlib ResNet model backend", distance_metric,"distance.")
|
||||
from deepface.basemodels.DlibResNet import DlibResNet #this is not a must because it is very huge.
|
||||
model = DlibResNet()
|
||||
models = {
|
||||
'VGG-Face': VGGFace.loadModel,
|
||||
'OpenFace': OpenFace.loadModel,
|
||||
'Facenet': Facenet.loadModel,
|
||||
'DeepFace': FbDeepFace.loadModel,
|
||||
'DeepID': DeepID.loadModel,
|
||||
'Dlib': DlibResNet_
|
||||
}
|
||||
|
||||
model = models.get(model_name)
|
||||
if model:
|
||||
model = model()
|
||||
print('Using {} model backend and {} distance'.format(model_name, distance_metric))
|
||||
else:
|
||||
raise ValueError("Invalid model_name passed - ", model_name)
|
||||
raise ValueError('Invalid model_name passed - {}'.format(model_name))
|
||||
|
||||
else: #model != None
|
||||
print("Already built model is passed")
|
||||
|
||||
@ -344,7 +339,6 @@ def verify(img1_path, img2_path = '', model_name ='VGG-Face', distance_metric =
|
||||
return resp_obj
|
||||
#return resp_objects
|
||||
|
||||
|
||||
def analyze(img_path, actions = [], models = {}, enforce_detection = True, detector_backend = 'opencv'):
|
||||
|
||||
if type(img_path) == list:
|
||||
@ -513,7 +507,6 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True, detec
|
||||
return resp_obj
|
||||
#return resp_objects
|
||||
|
||||
|
||||
def detectFace(img_path, detector_backend = 'opencv'):
|
||||
img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3)
|
||||
return img[:, :, ::-1] #bgr to rgb
|
||||
|
@ -20,6 +20,10 @@ import bz2
|
||||
from deepface.commons import distance
|
||||
from mtcnn import MTCNN #0.1.0
|
||||
|
||||
def load_mtcnn():
|
||||
global mtcnn_detector
|
||||
mtcnn_detector = MTCNN()
|
||||
|
||||
def loadBase64Img(uri):
|
||||
encoded_data = uri.split(',')[1]
|
||||
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
||||
@ -40,55 +44,18 @@ def initializeFolder():
|
||||
|
||||
def findThreshold(model_name, distance_metric):
|
||||
|
||||
threshold = 0.40
|
||||
base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}
|
||||
|
||||
if model_name == 'VGG-Face':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.40
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 0.55
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.75
|
||||
|
||||
elif model_name == 'OpenFace':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.10
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 0.55
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.55
|
||||
|
||||
elif model_name == 'Facenet':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.40
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 10
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.80
|
||||
|
||||
elif model_name == 'DeepFace':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.23
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 64
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.64
|
||||
|
||||
elif model_name == 'DeepID':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.015
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 45
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.17
|
||||
|
||||
elif model_name == 'Dlib':
|
||||
if distance_metric == 'cosine':
|
||||
threshold = 0.07
|
||||
elif distance_metric == 'euclidean':
|
||||
threshold = 0.60
|
||||
elif distance_metric == 'euclidean_l2':
|
||||
threshold = 0.60
|
||||
thresholds = {
|
||||
'VGG-Face': {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75},
|
||||
'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55},
|
||||
'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80},
|
||||
'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64},
|
||||
'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17},
|
||||
'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.6}
|
||||
}
|
||||
|
||||
threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
|
||||
|
||||
return threshold
|
||||
|
||||
@ -277,9 +244,9 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
||||
|
||||
elif detector_backend == 'mtcnn':
|
||||
|
||||
mtcnn_detector = MTCNN()
|
||||
|
||||
detections = mtcnn_detector.detect_faces(img)
|
||||
# mtcnn_detector = MTCNN()
|
||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
detections = mtcnn_detector.detect_faces(img_rgb)
|
||||
|
||||
if len(detections) > 0:
|
||||
detection = detections[0]
|
||||
@ -288,7 +255,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
||||
return detected_face
|
||||
|
||||
else: #if no face detected
|
||||
if enforce_detection != True:
|
||||
if not enforce_detection:
|
||||
return img
|
||||
|
||||
else:
|
||||
@ -432,8 +399,9 @@ def align_face(img, detector_backend = 'opencv'):
|
||||
|
||||
elif detector_backend == 'mtcnn':
|
||||
|
||||
mtcnn_detector = MTCNN()
|
||||
detections = mtcnn_detector.detect_faces(img)
|
||||
# mtcnn_detector = MTCNN()
|
||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
detections = mtcnn_detector.detect_faces(img_rgb)
|
||||
|
||||
if len(detections) > 0:
|
||||
detection = detections[0]
|
||||
|
Loading…
x
Reference in New Issue
Block a user