mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
global detector initializer
This commit is contained in:
parent
69acbacf88
commit
b663ac641b
@ -56,8 +56,7 @@ def verify(img1_path, img2_path = '', model_name='VGG-Face', distance_metric='co
|
|||||||
|
|
||||||
#------------------------------
|
#------------------------------
|
||||||
|
|
||||||
if detector_backend == 'mtcnn':
|
functions.initialize_detector(detector_backend = detector_backend)
|
||||||
functions.load_mtcnn()
|
|
||||||
|
|
||||||
resp_objects = []
|
resp_objects = []
|
||||||
|
|
||||||
@ -355,9 +354,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True, detec
|
|||||||
|
|
||||||
#---------------------------------
|
#---------------------------------
|
||||||
|
|
||||||
#build mtcnn model once
|
functions.initialize_detector(detector_backend = detector_backend)
|
||||||
if detector_backend == 'mtcnn':
|
|
||||||
functions.load_mtcnn()
|
|
||||||
|
|
||||||
#---------------------------------
|
#---------------------------------
|
||||||
|
|
||||||
@ -520,9 +517,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True, detec
|
|||||||
|
|
||||||
def detectFace(img_path, detector_backend = 'opencv'):
|
def detectFace(img_path, detector_backend = 'opencv'):
|
||||||
|
|
||||||
#build mtcnn model once
|
functions.initialize_detector(detector_backend = detector_backend)
|
||||||
if detector_backend == 'mtcnn':
|
|
||||||
functions.load_mtcnn()
|
|
||||||
|
|
||||||
img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3)
|
img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3)
|
||||||
return img[:, :, ::-1] #bgr to rgb
|
return img[:, :, ::-1] #bgr to rgb
|
||||||
@ -543,9 +538,7 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine',
|
|||||||
|
|
||||||
#-------------------------------
|
#-------------------------------
|
||||||
|
|
||||||
#build mtcnn model once
|
functions.initialize_detector(detector_backend = detector_backend)
|
||||||
if detector_backend == 'mtcnn':
|
|
||||||
functions.load_mtcnn()
|
|
||||||
|
|
||||||
#-------------------------------
|
#-------------------------------
|
||||||
|
|
||||||
|
@ -20,10 +20,84 @@ import bz2
|
|||||||
from deepface.commons import distance
|
from deepface.commons import distance
|
||||||
from mtcnn import MTCNN #0.1.0
|
from mtcnn import MTCNN #0.1.0
|
||||||
|
|
||||||
def load_mtcnn():
|
def initialize_detector(detector_backend):
|
||||||
global mtcnn_detector
|
|
||||||
mtcnn_detector = MTCNN()
|
global face_detector
|
||||||
|
|
||||||
|
home = str(Path.home())
|
||||||
|
|
||||||
|
if detector_backend == 'opencv':
|
||||||
|
opencv_path = get_opencv_path()
|
||||||
|
|
||||||
|
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
|
||||||
|
eye_detector_path = opencv_path+"haarcascade_eye.xml"
|
||||||
|
|
||||||
|
if os.path.isfile(face_detector_path) != True:
|
||||||
|
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
|
||||||
|
|
||||||
|
face_detector = cv2.CascadeClassifier(face_detector_path)
|
||||||
|
|
||||||
|
global eye_detector
|
||||||
|
eye_detector = cv2.CascadeClassifier(eye_detector_path)
|
||||||
|
|
||||||
|
elif detector_backend == 'ssd':
|
||||||
|
|
||||||
|
#check required ssd model exists in the home/.deepface/weights folder
|
||||||
|
|
||||||
|
#model structure
|
||||||
|
if os.path.isfile(home+'/.deepface/weights/deploy.prototxt') != True:
|
||||||
|
|
||||||
|
print("deploy.prototxt will be downloaded...")
|
||||||
|
|
||||||
|
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
|
||||||
|
|
||||||
|
output = home+'/.deepface/weights/deploy.prototxt'
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
#pre-trained weights
|
||||||
|
if os.path.isfile(home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel') != True:
|
||||||
|
|
||||||
|
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
|
||||||
|
|
||||||
|
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
|
||||||
|
|
||||||
|
output = home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel'
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
face_detector = cv2.dnn.readNetFromCaffe(
|
||||||
|
home+"/.deepface/weights/deploy.prototxt",
|
||||||
|
home+"/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif detector_backend == 'dlib':
|
||||||
|
import dlib #this is not a must library within deepface. that's why, I didn't put this import to a global level. version: 19.20.0
|
||||||
|
|
||||||
|
global sp
|
||||||
|
|
||||||
|
face_detector = dlib.get_frontal_face_detector()
|
||||||
|
|
||||||
|
#check required file exists in the home/.deepface/weights folder
|
||||||
|
if os.path.isfile(home+'/.deepface/weights/shape_predictor_5_face_landmarks.dat') != True:
|
||||||
|
|
||||||
|
print("shape_predictor_5_face_landmarks.dat.bz2 is going to be downloaded")
|
||||||
|
|
||||||
|
url = "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
|
||||||
|
output = home+'/.deepface/weights/'+url.split("/")[-1]
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
zipfile = bz2.BZ2File(output)
|
||||||
|
data = zipfile.read()
|
||||||
|
newfilepath = output[:-4] #discard .bz2 extension
|
||||||
|
open(newfilepath, 'wb').write(data)
|
||||||
|
|
||||||
|
sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat")
|
||||||
|
|
||||||
|
elif detector_backend == 'mtcnn':
|
||||||
|
face_detector = MTCNN()
|
||||||
|
|
||||||
def loadBase64Img(uri):
|
def loadBase64Img(uri):
|
||||||
encoded_data = uri.split(',')[1]
|
encoded_data = uri.split(',')[1]
|
||||||
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
||||||
@ -97,17 +171,6 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if detector_backend == 'opencv':
|
if detector_backend == 'opencv':
|
||||||
|
|
||||||
#get opencv configuration up first
|
|
||||||
opencv_path = get_opencv_path()
|
|
||||||
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
|
|
||||||
|
|
||||||
if os.path.isfile(face_detector_path) != True:
|
|
||||||
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
|
|
||||||
|
|
||||||
face_detector = cv2.CascadeClassifier(face_detector_path)
|
|
||||||
|
|
||||||
#--------------------------
|
|
||||||
|
|
||||||
faces = []
|
faces = []
|
||||||
|
|
||||||
@ -131,39 +194,6 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
|
|
||||||
elif detector_backend == 'ssd':
|
elif detector_backend == 'ssd':
|
||||||
|
|
||||||
#---------------------------
|
|
||||||
#check required ssd model exists in the home/.deepface/weights folder
|
|
||||||
|
|
||||||
#model structure
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/deploy.prototxt') != True:
|
|
||||||
|
|
||||||
print("deploy.prototxt will be downloaded...")
|
|
||||||
|
|
||||||
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
|
|
||||||
|
|
||||||
output = home+'/.deepface/weights/deploy.prototxt'
|
|
||||||
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
|
|
||||||
#pre-trained weights
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel') != True:
|
|
||||||
|
|
||||||
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
|
|
||||||
|
|
||||||
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
|
|
||||||
|
|
||||||
output = home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel'
|
|
||||||
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
#---------------------------
|
|
||||||
|
|
||||||
ssd_detector = cv2.dnn.readNetFromCaffe(
|
|
||||||
home+"/.deepface/weights/deploy.prototxt",
|
|
||||||
home+"/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
|
|
||||||
)
|
|
||||||
|
|
||||||
ssd_labels = ["img_id", "is_face", "confidence", "left", "top", "right", "bottom"]
|
ssd_labels = ["img_id", "is_face", "confidence", "left", "top", "right", "bottom"]
|
||||||
|
|
||||||
target_size = (300, 300)
|
target_size = (300, 300)
|
||||||
@ -179,8 +209,8 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
|
|
||||||
imageBlob = cv2.dnn.blobFromImage(image = img)
|
imageBlob = cv2.dnn.blobFromImage(image = img)
|
||||||
|
|
||||||
ssd_detector.setInput(imageBlob)
|
face_detector.setInput(imageBlob)
|
||||||
detections = ssd_detector.forward()
|
detections = face_detector.forward()
|
||||||
|
|
||||||
detections_df = pd.DataFrame(detections[0][0], columns = ssd_labels)
|
detections_df = pd.DataFrame(detections[0][0], columns = ssd_labels)
|
||||||
|
|
||||||
@ -218,11 +248,8 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
||||||
|
|
||||||
elif detector_backend == 'dlib':
|
elif detector_backend == 'dlib':
|
||||||
import dlib #this is not a must library within deepface. that's why, I didn't put this import to a global level. version: 19.20.0
|
|
||||||
|
detections = face_detector(img, 1)
|
||||||
detector = dlib.get_frontal_face_detector()
|
|
||||||
|
|
||||||
detections = detector(img, 1)
|
|
||||||
|
|
||||||
if len(detections) > 0:
|
if len(detections) > 0:
|
||||||
|
|
||||||
@ -244,9 +271,8 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
|
|
||||||
elif detector_backend == 'mtcnn':
|
elif detector_backend == 'mtcnn':
|
||||||
|
|
||||||
# mtcnn_detector = MTCNN() #this is a global variable now
|
|
||||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
|
||||||
detections = mtcnn_detector.detect_faces(img_rgb)
|
detections = face_detector.detect_faces(img_rgb)
|
||||||
|
|
||||||
if len(detections) > 0:
|
if len(detections) > 0:
|
||||||
detection = detections[0]
|
detection = detections[0]
|
||||||
@ -264,8 +290,6 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
|
|||||||
else:
|
else:
|
||||||
detectors = ['opencv', 'ssd', 'dlib', 'mtcnn']
|
detectors = ['opencv', 'ssd', 'dlib', 'mtcnn']
|
||||||
raise ValueError("Valid backends are ", detectors," but you passed ", detector_backend)
|
raise ValueError("Valid backends are ", detectors," but you passed ", detector_backend)
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def alignment_procedure(img, left_eye, right_eye):
|
def alignment_procedure(img, left_eye, right_eye):
|
||||||
|
|
||||||
@ -319,10 +343,6 @@ def align_face(img, detector_backend = 'opencv'):
|
|||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if (detector_backend == 'opencv') or (detector_backend == 'ssd'):
|
if (detector_backend == 'opencv') or (detector_backend == 'ssd'):
|
||||||
|
|
||||||
opencv_path = get_opencv_path()
|
|
||||||
eye_detector_path = opencv_path+"haarcascade_eye.xml"
|
|
||||||
eye_detector = cv2.CascadeClassifier(eye_detector_path)
|
|
||||||
|
|
||||||
detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #eye detector expects gray scale image
|
detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #eye detector expects gray scale image
|
||||||
|
|
||||||
@ -364,31 +384,10 @@ def align_face(img, detector_backend = 'opencv'):
|
|||||||
return img #return img anyway
|
return img #return img anyway
|
||||||
|
|
||||||
elif detector_backend == 'dlib':
|
elif detector_backend == 'dlib':
|
||||||
|
|
||||||
#check required file exists in the home/.deepface/weights folder
|
|
||||||
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/shape_predictor_5_face_landmarks.dat') != True:
|
|
||||||
|
|
||||||
print("shape_predictor_5_face_landmarks.dat.bz2 is going to be downloaded")
|
|
||||||
|
|
||||||
url = "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
|
|
||||||
output = home+'/.deepface/weights/'+url.split("/")[-1]
|
|
||||||
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
zipfile = bz2.BZ2File(output)
|
|
||||||
data = zipfile.read()
|
|
||||||
newfilepath = output[:-4] #discard .bz2 extension
|
|
||||||
open(newfilepath, 'wb').write(data)
|
|
||||||
|
|
||||||
#------------------------------
|
|
||||||
|
|
||||||
import dlib #this is not a must dependency in deepface
|
import dlib #this is not a must dependency in deepface
|
||||||
|
|
||||||
detector = dlib.get_frontal_face_detector()
|
detections = face_detector(img, 1)
|
||||||
sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat")
|
|
||||||
|
|
||||||
detections = detector(img, 1)
|
|
||||||
|
|
||||||
if len(detections) > 0:
|
if len(detections) > 0:
|
||||||
detected_face = detections[0]
|
detected_face = detections[0]
|
||||||
@ -399,9 +398,8 @@ def align_face(img, detector_backend = 'opencv'):
|
|||||||
|
|
||||||
elif detector_backend == 'mtcnn':
|
elif detector_backend == 'mtcnn':
|
||||||
|
|
||||||
# mtcnn_detector = MTCNN() #this is a global variable now
|
|
||||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
|
||||||
detections = mtcnn_detector.detect_faces(img_rgb)
|
detections = face_detector.detect_faces(img_rgb)
|
||||||
|
|
||||||
if len(detections) > 0:
|
if len(detections) > 0:
|
||||||
detection = detections[0]
|
detection = detections[0]
|
||||||
|
@ -15,6 +15,26 @@ dataset = [
|
|||||||
|
|
||||||
print("-----------------------------------------")
|
print("-----------------------------------------")
|
||||||
|
|
||||||
|
print("Face detectors test")
|
||||||
|
|
||||||
|
print("opencv detector")
|
||||||
|
res = DeepFace.verify(dataset, detector_backend = 'opencv')
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
print("ssd detector")
|
||||||
|
res = DeepFace.verify(dataset, detector_backend = 'ssd')
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
print("dlib detector")
|
||||||
|
res = DeepFace.verify(dataset, detector_backend = 'dlib')
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
print("mtcnn detector")
|
||||||
|
res = DeepFace.verify(dataset, detector_backend = 'mtcnn')
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
print("-----------------------------------------")
|
||||||
|
|
||||||
print("Large scale face recognition")
|
print("Large scale face recognition")
|
||||||
|
|
||||||
df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset"
|
df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset"
|
||||||
@ -231,4 +251,4 @@ for backend in backends:
|
|||||||
print("Backend ", backend, " is done in ", toc-tic," seconds")
|
print("Backend ", backend, " is done in ", toc-tic," seconds")
|
||||||
|
|
||||||
#-----------------------------------
|
#-----------------------------------
|
||||||
print("--------------------------")
|
print("--------------------------")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user