From d11f08958e556742b2b35a6446ee883986e61e72 Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Thu, 29 Apr 2021 21:00:25 +0300 Subject: [PATCH] global eye detector for opencv and ssd --- deepface/detectors/DlibWrapper.py | 12 +++++++++--- deepface/detectors/OpenCvWrapper.py | 25 ++++++++++++++++--------- deepface/detectors/SsdWrapper.py | 13 ++++++++++--- tests/unit_tests.py | 25 +++++++++++++------------ 4 files changed, 48 insertions(+), 27 deletions(-) diff --git a/deepface/detectors/DlibWrapper.py b/deepface/detectors/DlibWrapper.py index a23612b..7514866 100644 --- a/deepface/detectors/DlibWrapper.py +++ b/deepface/detectors/DlibWrapper.py @@ -25,19 +25,25 @@ def build_model(): open(newfilepath, 'wb').write(data) face_detector = dlib.get_frontal_face_detector() - return face_detector + sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat") -def detect_face(face_detector, img): + detector = {} + detector["face_detector"] = face_detector + detector["sp"] = sp + return detector + +def detect_face(detector, img): import dlib #this requirement is not a must that's why imported here home = str(Path.home()) - sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat") + sp = detector["sp"] detected_face = None img_region = [0, 0, img.shape[0], img.shape[1]] + face_detector = detector["face_detector"] detections = face_detector(img, 1) if len(detections) > 0: diff --git a/deepface/detectors/OpenCvWrapper.py b/deepface/detectors/OpenCvWrapper.py index a759030..d361685 100644 --- a/deepface/detectors/OpenCvWrapper.py +++ b/deepface/detectors/OpenCvWrapper.py @@ -3,7 +3,16 @@ import os import pandas as pd from deepface.detectors import FaceDetector -def build_model(model_name = 'haarcascade'): +def build_model(): + + detector ={} + + detector["face_detector"] = build_cascade('haarcascade') + detector["eye_detector"] = build_cascade('haarcascade_eye') + + return detector + +def build_cascade(model_name = 'haarcascade'): opencv_path = get_opencv_path() if model_name == 'haarcascade': @@ -16,6 +25,7 @@ def build_model(model_name = 'haarcascade'): face_detector = cv2.CascadeClassifier(face_detector_path) return face_detector + elif model_name == 'haarcascade_eye': eye_detector_path = opencv_path+"haarcascade_eye.xml" @@ -25,15 +35,14 @@ def build_model(model_name = 'haarcascade'): eye_detector = cv2.CascadeClassifier(eye_detector_path) return eye_detector - -def detect_face(face_detector, img): +def detect_face(detector, img): detected_face = None img_region = [0, 0, img.shape[0], img.shape[1]] faces = [] try: - faces = face_detector.detectMultiScale(img, 1.3, 5) + faces = detector["face_detector"].detectMultiScale(img, 1.3, 5) except: pass @@ -41,15 +50,13 @@ def detect_face(face_detector, img): x,y,w,h = faces[0] #focus on the 1st face found in the image detected_face = img[int(y):int(y+h), int(x):int(x+w)] - detected_face = align_face(detected_face) + detected_face = align_face(detector["eye_detector"], detected_face) img_region = [x, y, w, h] return detected_face, img_region -def align_face(img): - - eye_detector = build_model(model_name = 'haarcascade_eye') - +def align_face(eye_detector, img): + detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #eye detector expects gray scale image eyes = eye_detector.detectMultiScale(detected_face_gray) diff --git a/deepface/detectors/SsdWrapper.py b/deepface/detectors/SsdWrapper.py index 17a0c14..f574945 100644 --- a/deepface/detectors/SsdWrapper.py +++ b/deepface/detectors/SsdWrapper.py @@ -37,9 +37,15 @@ def build_model(): home+"/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel" ) - return face_detector + eye_detector = OpenCvWrapper.build_cascade("haarcascade_eye") -def detect_face(face_detector, img): + detector = {} + detector["face_detector"] = face_detector + detector["eye_detector"] = eye_detector + + return detector + +def detect_face(detector, img): detected_face = None img_region = [0, 0, img.shape[0], img.shape[1]] @@ -59,6 +65,7 @@ def detect_face(face_detector, img): imageBlob = cv2.dnn.blobFromImage(image = img) + face_detector = detector["face_detector"] face_detector.setInput(imageBlob) detections = face_detector.forward() @@ -87,6 +94,6 @@ def detect_face(face_detector, img): detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)] img_region = [int(left*aspect_ratio_x), int(top*aspect_ratio_y), int(right*aspect_ratio_x) - int(left*aspect_ratio_x), int(bottom*aspect_ratio_y) - int(top*aspect_ratio_y)] - detected_face = OpenCvWrapper.align_face(detected_face) + detected_face = OpenCvWrapper.align_face(detector["eye_detector"], detected_face) return detected_face, img_region diff --git a/tests/unit_tests.py b/tests/unit_tests.py index 951d235..20bfdd3 100644 --- a/tests/unit_tests.py +++ b/tests/unit_tests.py @@ -26,6 +26,19 @@ from deepface.extendedmodels import Age, Gender, Race, Emotion #----------------------------------------- +if False: + print("Detector tests") + import matplotlib.pyplot as plt + detectors = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface'] + for detector in detectors: + img = DeepFace.detectFace("dataset/img11.jpg", detector_backend = detector) + plt.imshow(img) + plt.show() + +#----------------------------------------- +print("-----------------------------------------") + + img_path = "dataset/img1.jpg" embedding = DeepFace.represent(img_path) print("Function returned ", len(embedding), "dimensional vector") @@ -45,18 +58,6 @@ dataset = [ print("-----------------------------------------") -if False: - print("Detector tests") - import matplotlib.pyplot as plt - detectors = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface'] - #detectors = ['retinaface'] - for detector in detectors: - img = DeepFace.detectFace("dataset/img1.jpg", detector_backend = detector) - plt.imshow(img) - plt.show() - -print("-----------------------------------------") - print("Face detectors test") print("retinaface detector")