diff --git a/README.md b/README.md
index a38a5ac..7056d7d 100644
--- a/README.md
+++ b/README.md
@@ -193,7 +193,7 @@ Age model got ± 4.65 MAE; gender model got 97.44% accuracy, 96.29% precision an
**Face Detectors** - [`Demo`](https://youtu.be/GZ2p2hj2H5k)
-Face detection and alignment are important early stages of a modern face recognition pipeline. Experiments show that just alignment increases the face recognition accuracy almost 1%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`SSD`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MTCNN`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/), [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/), [`YOLOv8 Face`](https://github.com/derronqi/yolov8-face) and [`YuNet`](https://github.com/ShiqiYu/libfacedetection) detectors are wrapped in deepface.
+Face detection and alignment are important early stages of a modern face recognition pipeline. Experiments show that just alignment increases the face recognition accuracy almost 1%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`SSD`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MTCNN`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), [`Faster MTCNN`](https://github.com/timesler/facenet-pytorch), [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/), [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/), [`YOLOv8 Face`](https://github.com/derronqi/yolov8-face) and [`YuNet`](https://github.com/ShiqiYu/libfacedetection) detectors are wrapped in deepface.

@@ -209,6 +209,7 @@ backends = [
'mediapipe',
'yolov8',
'yunet',
+ 'fastmtcnn',
]
#face verification
diff --git a/deepface/detectors/FaceDetector.py b/deepface/detectors/FaceDetector.py
index 522592d..cd91312 100644
--- a/deepface/detectors/FaceDetector.py
+++ b/deepface/detectors/FaceDetector.py
@@ -11,6 +11,7 @@ from deepface.detectors import (
MediapipeWrapper,
YoloWrapper,
YunetWrapper,
+ FastMtcnnWrapper,
)
@@ -26,6 +27,7 @@ def build_model(detector_backend):
"mediapipe": MediapipeWrapper.build_model,
"yolov8": YoloWrapper.build_model,
"yunet": YunetWrapper.build_model,
+ "fastmtcnn": FastMtcnnWrapper.build_model,
}
if not "face_detector_obj" in globals():
@@ -70,6 +72,7 @@ def detect_faces(face_detector, detector_backend, img, align=True):
"mediapipe": MediapipeWrapper.detect_face,
"yolov8": YoloWrapper.detect_face,
"yunet": YunetWrapper.detect_face,
+ "fastmtcnn": FastMtcnnWrapper.detect_face,
}
detect_face_fn = backends.get(detector_backend)
diff --git a/deepface/detectors/FastMtcnnWrapper.py b/deepface/detectors/FastMtcnnWrapper.py
new file mode 100644
index 0000000..20461a3
--- /dev/null
+++ b/deepface/detectors/FastMtcnnWrapper.py
@@ -0,0 +1,57 @@
+import cv2
+from deepface.detectors import FaceDetector
+
+# Link -> https://github.com/timesler/facenet-pytorch
+# Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
+
+def build_model():
+ # Optional dependency
+ try:
+ from facenet_pytorch import MTCNN as fast_mtcnn
+ except ModuleNotFoundError as e:
+ raise ImportError("This is an optional detector, ensure the library is installed. \
+ Please install using 'pip install facenet-pytorch' ") from e
+
+
+ face_detector = fast_mtcnn(image_size=160,
+ thresholds=[0.6, 0.7, 0.7], # MTCNN thresholds
+ post_process=True,
+ device='cpu',
+ select_largest=False, # return result in descending order
+ )
+ return face_detector
+
+def xyxy_to_xywh(xyxy):
+ """
+ Convert xyxy format to xywh format.
+ """
+ x, y = xyxy[0], xyxy[1]
+ w = xyxy[2] - x + 1
+ h = xyxy[3] - y + 1
+ return [x, y, w, h]
+
+def detect_face(face_detector, img, align=True):
+
+ resp = []
+
+ detected_face = None
+ img_region = [0, 0, img.shape[1], img.shape[0]]
+
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
+ detections = face_detector.detect(img_rgb, landmarks=True) # returns boundingbox, prob, landmark
+ if len(detections[0]) > 0:
+
+ for detection in zip(*detections):
+ x, y, w, h = xyxy_to_xywh(detection[0])
+ detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
+ img_region = [x, y, w, h]
+ confidence = detection[1]
+
+ if align:
+ left_eye = detection[2][0]
+ right_eye = detection[2][1]
+ detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
+
+ resp.append((detected_face, img_region, confidence))
+
+ return resp
diff --git a/requirements_additional.txt b/requirements_additional.txt
index 334c4f9..0344661 100644
--- a/requirements_additional.txt
+++ b/requirements_additional.txt
@@ -1,4 +1,5 @@
opencv-contrib-python>=4.3.0.36
mediapipe>=0.8.7.3
dlib>=19.20.0
-ultralytics>=8.0.122
\ No newline at end of file
+ultralytics>=8.0.122
+facenet-pytorch>=2.5.3
\ No newline at end of file