diff --git a/README.md b/README.md
index 90e31f1..1467806 100644
--- a/README.md
+++ b/README.md
@@ -304,6 +304,27 @@ user
│ │ ├── Bob.jpg
```
+**Face Anti Spoofing** - `Demo`
+
+DeepFace also includes an anti-spoofing analysis module to understand given image is real or fake. To activate this feature, set the `anti_spoofing` argument to True in any DeepFace tasks.
+
+

+
+```python
+# anti spoofing test in face detection
+face_objs = DeepFace.extract_faces(
+ img_path="dataset/img1.jpg",
+ anti_spoofing = True
+)
+assert face_objs[0]["is_real"] is True
+
+# anti spoofing test in real time analysis
+DeepFace.stream(
+ db_path = "C:/User/Sefik/Desktop/database",
+ anti_spoofing = True
+)
+```
+
**API** - [`Demo`](https://youtu.be/HeKCQ6U9XmI)
DeepFace serves an API as well - see [`api folder`](https://github.com/serengil/deepface/tree/master/deepface/api/src) for more details. You can clone deepface source code and run the api with the following command. It will use gunicorn server to get a rest service up. In this way, you can call deepface from an external system such as mobile app or web.
@@ -418,7 +439,6 @@ Also, if you use deepface in your GitHub projects, please add `deepface` in the
DeepFace is licensed under the MIT License - see [`LICENSE`](https://github.com/serengil/deepface/blob/master/LICENSE) for more details.
-DeepFace wraps some external face recognition models: [VGG-Face](http://www.robots.ox.ac.uk/~vgg/software/vgg_face/), [Facenet](https://github.com/davidsandberg/facenet/blob/master/LICENSE.md) (both 128d and 512d), [OpenFace](https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/LICENSE), [DeepFace](https://github.com/swghosh/DeepFace), [DeepID](https://github.com/Ruoyiran/DeepID/blob/master/LICENSE.md), [ArcFace](https://github.com/leondgarse/Keras_insightface/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/dlib/LICENSE.txt), [SFace](https://github.com/opencv/opencv_zoo/blob/master/models/face_recognition_sface/LICENSE) and [GhostFaceNet](https://github.com/HamadYA/GhostFaceNets/blob/main/LICENSE). Besides, age, gender and race / ethnicity models were trained on the backbone of VGG-Face with transfer learning. Similarly, DeepFace wraps many face detectors: [OpenCv](https://github.com/opencv/opencv/blob/4.x/LICENSE), [Ssd](https://github.com/opencv/opencv/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/LICENSE.txt), [MtCnn](https://github.com/ipazc/mtcnn/blob/master/LICENSE), [Fast MtCnn](https://github.com/timesler/facenet-pytorch/blob/master/LICENSE.md), [RetinaFace](https://github.com/serengil/retinaface/blob/master/LICENSE), [MediaPipe](https://github.com/google/mediapipe/blob/master/LICENSE), [YuNet](https://github.com/ShiqiYu/libfacedetection/blob/master/LICENSE), [Yolo](https://github.com/derronqi/yolov8-face/blob/main/LICENSE) and [CenterFace](https://github.com/Star-Clouds/CenterFace/blob/master/LICENSE). License types will be inherited when you intend to utilize those models. Please check the license types of those models for production purposes.
-
+DeepFace wraps some external face recognition models: [VGG-Face](http://www.robots.ox.ac.uk/~vgg/software/vgg_face/), [Facenet](https://github.com/davidsandberg/facenet/blob/master/LICENSE.md) (both 128d and 512d), [OpenFace](https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/LICENSE), [DeepFace](https://github.com/swghosh/DeepFace), [DeepID](https://github.com/Ruoyiran/DeepID/blob/master/LICENSE.md), [ArcFace](https://github.com/leondgarse/Keras_insightface/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/dlib/LICENSE.txt), [SFace](https://github.com/opencv/opencv_zoo/blob/master/models/face_recognition_sface/LICENSE) and [GhostFaceNet](https://github.com/HamadYA/GhostFaceNets/blob/main/LICENSE). Besides, age, gender and race / ethnicity models were trained on the backbone of VGG-Face with transfer learning. Similarly, DeepFace wraps many face detectors: [OpenCv](https://github.com/opencv/opencv/blob/4.x/LICENSE), [Ssd](https://github.com/opencv/opencv/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/LICENSE.txt), [MtCnn](https://github.com/ipazc/mtcnn/blob/master/LICENSE), [Fast MtCnn](https://github.com/timesler/facenet-pytorch/blob/master/LICENSE.md), [RetinaFace](https://github.com/serengil/retinaface/blob/master/LICENSE), [MediaPipe](https://github.com/google/mediapipe/blob/master/LICENSE), [YuNet](https://github.com/ShiqiYu/libfacedetection/blob/master/LICENSE), [Yolo](https://github.com/derronqi/yolov8-face/blob/main/LICENSE) and [CenterFace](https://github.com/Star-Clouds/CenterFace/blob/master/LICENSE). Finally, DeepFace is optionally using [face anti spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/LICENSE) to determine the given images are real or fake. License types will be inherited when you intend to utilize those models. Please check the license types of those models for production purposes.
DeepFace [logo](https://thenounproject.com/term/face-recognition/2965879/) is created by [Adrien Coquet](https://thenounproject.com/coquet_adrien/) and it is licensed under [Creative Commons: By Attribution 3.0 License](https://creativecommons.org/licenses/by/3.0/).
diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py
index 159dcb4..0ac2be5 100644
--- a/deepface/DeepFace.py
+++ b/deepface/DeepFace.py
@@ -73,6 +73,7 @@ def verify(
normalization: str = "base",
silent: bool = False,
threshold: Optional[float] = None,
+ anti_spoofing: bool = False,
) -> Dict[str, Any]:
"""
Verify if an image pair represents the same person or different persons.
@@ -113,6 +114,8 @@ def verify(
If left unset, default pre-tuned threshold values will be applied based on the specified
model name and distance metric (default is None).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
result (dict): A dictionary containing verification results with following keys.
@@ -150,6 +153,7 @@ def verify(
normalization=normalization,
silent=silent,
threshold=threshold,
+ anti_spoofing=anti_spoofing,
)
@@ -161,6 +165,7 @@ def analyze(
align: bool = True,
expand_percentage: int = 0,
silent: bool = False,
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Analyze facial attributes such as age, gender, emotion, and race in the provided image.
@@ -189,6 +194,8 @@ def analyze(
silent (boolean): Suppress or allow some log messages for a quieter analysis process
(default is False).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
the analysis results for a detected face. Each dictionary in the list contains the
@@ -245,6 +252,7 @@ def analyze(
align=align,
expand_percentage=expand_percentage,
silent=silent,
+ anti_spoofing=anti_spoofing,
)
@@ -261,6 +269,7 @@ def find(
normalization: str = "base",
silent: bool = False,
refresh_database: bool = True,
+ anti_spoofing: bool = False,
) -> List[pd.DataFrame]:
"""
Identify individuals in a database
@@ -301,8 +310,10 @@ def find(
(default is False).
refresh_database (boolean): Synchronizes the images representation (pkl) file with the
- directory/db files, if set to false, it will ignore any file changes inside the db_path
- (default is True).
+ directory/db files, if set to false, it will ignore any file changes inside the db_path
+ (default is True).
+
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
Returns:
results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
@@ -335,6 +346,7 @@ def find(
normalization=normalization,
silent=silent,
refresh_database=refresh_database,
+ anti_spoofing=anti_spoofing,
)
@@ -346,6 +358,7 @@ def represent(
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Represent facial images as multi-dimensional vector embeddings.
@@ -375,6 +388,8 @@ def represent(
Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
(default is base).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, each containing the
following fields:
@@ -399,6 +414,7 @@ def represent(
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
+ anti_spoofing=anti_spoofing,
)
@@ -411,6 +427,7 @@ def stream(
source: Any = 0,
time_threshold: int = 5,
frame_threshold: int = 5,
+ anti_spoofing: bool = False,
) -> None:
"""
Run real time face recognition and facial attribute analysis
@@ -437,6 +454,8 @@ def stream(
time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
frame_threshold (int): The frame threshold for face recognition (default is 5).
+
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
Returns:
None
"""
@@ -453,6 +472,7 @@ def stream(
source=source,
time_threshold=time_threshold,
frame_threshold=frame_threshold,
+ anti_spoofing=anti_spoofing,
)
@@ -463,6 +483,7 @@ def extract_faces(
align: bool = True,
expand_percentage: int = 0,
grayscale: bool = False,
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Extract faces from a given image
@@ -485,6 +506,8 @@ def extract_faces(
grayscale (boolean): Flag to convert the image to grayscale before
processing (default is False).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
@@ -497,6 +520,12 @@ def extract_faces(
instead of observer.
- "confidence" (float): The confidence score associated with the detected face.
+
+ - "is_real" (boolean): antispoofing analyze result. this key is just available in the
+ result only if anti_spoofing is set to True in input arguments.
+
+ - "antispoof_score" (float): score of antispoofing analyze result. this key is
+ just available in the result only if anti_spoofing is set to True in input arguments.
"""
return detection.extract_faces(
@@ -506,6 +535,7 @@ def extract_faces(
align=align,
expand_percentage=expand_percentage,
grayscale=grayscale,
+ anti_spoofing=anti_spoofing,
)
diff --git a/deepface/api/src/modules/core/routes.py b/deepface/api/src/modules/core/routes.py
index c7254fc..98f624a 100644
--- a/deepface/api/src/modules/core/routes.py
+++ b/deepface/api/src/modules/core/routes.py
@@ -24,17 +24,13 @@ def represent():
if img_path is None:
return {"message": "you must pass img_path input"}
- model_name = input_args.get("model_name", "VGG-Face")
- detector_backend = input_args.get("detector_backend", "opencv")
- enforce_detection = input_args.get("enforce_detection", True)
- align = input_args.get("align", True)
-
obj = service.represent(
img_path=img_path,
- model_name=model_name,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
+ model_name=input_args.get("model_name", "VGG-Face"),
+ detector_backend=input_args.get("detector_backend", "opencv"),
+ enforce_detection=input_args.get("enforce_detection", True),
+ align=input_args.get("align", True),
+ anti_spoofing=input_args.get("anti_spoofing", False),
)
logger.debug(obj)
@@ -58,20 +54,15 @@ def verify():
if img2_path is None:
return {"message": "you must pass img2_path input"}
- model_name = input_args.get("model_name", "VGG-Face")
- detector_backend = input_args.get("detector_backend", "opencv")
- enforce_detection = input_args.get("enforce_detection", True)
- distance_metric = input_args.get("distance_metric", "cosine")
- align = input_args.get("align", True)
-
verification = service.verify(
img1_path=img1_path,
img2_path=img2_path,
- model_name=model_name,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- align=align,
- enforce_detection=enforce_detection,
+ model_name=input_args.get("model_name", "VGG-Face"),
+ detector_backend=input_args.get("detector_backend", "opencv"),
+ distance_metric=input_args.get("distance_metric", "cosine"),
+ align=input_args.get("align", True),
+ enforce_detection=input_args.get("enforce_detection", True),
+ anti_spoofing=input_args.get("anti_spoofing", False),
)
logger.debug(verification)
@@ -90,17 +81,13 @@ def analyze():
if img_path is None:
return {"message": "you must pass img_path input"}
- detector_backend = input_args.get("detector_backend", "opencv")
- enforce_detection = input_args.get("enforce_detection", True)
- align = input_args.get("align", True)
- actions = input_args.get("actions", ["age", "gender", "emotion", "race"])
-
demographies = service.analyze(
img_path=img_path,
- actions=actions,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
+ actions=input_args.get("actions", ["age", "gender", "emotion", "race"]),
+ detector_backend=input_args.get("detector_backend", "opencv"),
+ enforce_detection=input_args.get("enforce_detection", True),
+ align=input_args.get("align", True),
+ anti_spoofing=input_args.get("anti_spoofing", False),
)
logger.debug(demographies)
diff --git a/deepface/api/src/modules/core/service.py b/deepface/api/src/modules/core/service.py
index 6ba3c69..ec3b6b1 100644
--- a/deepface/api/src/modules/core/service.py
+++ b/deepface/api/src/modules/core/service.py
@@ -3,7 +3,14 @@ from deepface import DeepFace
# pylint: disable=broad-except
-def represent(img_path, model_name, detector_backend, enforce_detection, align):
+def represent(
+ img_path: str,
+ model_name: str,
+ detector_backend: str,
+ enforce_detection: bool,
+ align: bool,
+ anti_spoofing: bool,
+):
try:
result = {}
embedding_objs = DeepFace.represent(
@@ -12,6 +19,7 @@ def represent(img_path, model_name, detector_backend, enforce_detection, align):
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
+ anti_spoofing=anti_spoofing,
)
result["results"] = embedding_objs
return result
@@ -20,7 +28,14 @@ def represent(img_path, model_name, detector_backend, enforce_detection, align):
def verify(
- img1_path, img2_path, model_name, detector_backend, distance_metric, enforce_detection, align
+ img1_path: str,
+ img2_path: str,
+ model_name: str,
+ detector_backend: str,
+ distance_metric: str,
+ enforce_detection: bool,
+ align: bool,
+ anti_spoofing: bool,
):
try:
obj = DeepFace.verify(
@@ -31,13 +46,21 @@ def verify(
distance_metric=distance_metric,
align=align,
enforce_detection=enforce_detection,
+ anti_spoofing=anti_spoofing,
)
return obj
except Exception as err:
return {"error": f"Exception while verifying: {str(err)}"}, 400
-def analyze(img_path, actions, detector_backend, enforce_detection, align):
+def analyze(
+ img_path: str,
+ actions: list,
+ detector_backend: str,
+ enforce_detection: bool,
+ align: bool,
+ anti_spoofing: bool,
+):
try:
result = {}
demographies = DeepFace.analyze(
@@ -47,6 +70,7 @@ def analyze(img_path, actions, detector_backend, enforce_detection, align):
enforce_detection=enforce_detection,
align=align,
silent=True,
+ anti_spoofing=anti_spoofing,
)
result["results"] = demographies
return result
diff --git a/deepface/commons/file_utils.py b/deepface/commons/file_utils.py
new file mode 100644
index 0000000..b5b0169
--- /dev/null
+++ b/deepface/commons/file_utils.py
@@ -0,0 +1,31 @@
+# built-in dependencies
+import os
+
+# 3rd party dependencies
+import gdown
+
+# project dependencies
+from deepface.commons import logger as log
+
+logger = log.get_singletonish_logger()
+
+
+def download_external_file(file_name: str, exact_file_path: str, url: str) -> None:
+ """
+ Download an external file
+ Args:
+ file_name (str): file name with extension
+ exact_file_path (str): exact location of the file with file name
+ url (str): url to be downloaded
+ Returns:
+ None
+ """
+ if os.path.exists(exact_file_path) is False:
+ logger.info(f"Downloading MiniFASNetV2 weights to {exact_file_path}")
+ try:
+ gdown.download(url, exact_file_path, quiet=False)
+ except Exception as err:
+ raise ValueError(
+ f"Exception while downloading {file_name} from {url} to {exact_file_path}."
+ "You may consider to download it and copy to the target destination."
+ ) from err
diff --git a/deepface/modules/demography.py b/deepface/modules/demography.py
index f11f71d..3cc3ebc 100644
--- a/deepface/modules/demography.py
+++ b/deepface/modules/demography.py
@@ -18,6 +18,7 @@ def analyze(
align: bool = True,
expand_percentage: int = 0,
silent: bool = False,
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Analyze facial attributes such as age, gender, emotion, and race in the provided image.
@@ -47,6 +48,8 @@ def analyze(
silent (boolean): Suppress or allow some log messages for a quieter analysis process
(default is False).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
the analysis results for a detected face.
@@ -124,9 +127,13 @@ def analyze(
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
+ anti_spoofing=anti_spoofing,
)
for img_obj in img_objs:
+ if anti_spoofing is True and img_obj.get("is_real", True) is False:
+ raise ValueError("Spoof detected in the given image.")
+
img_content = img_obj["face"]
img_region = img_obj["facial_area"]
img_confidence = img_obj["confidence"]
diff --git a/deepface/modules/detection.py b/deepface/modules/detection.py
index ad4b288..98bfee7 100644
--- a/deepface/modules/detection.py
+++ b/deepface/modules/detection.py
@@ -7,6 +7,7 @@ import cv2
from PIL import Image
# project dependencies
+from deepface.modules import modeling
from deepface.models.Detector import DetectedFace, FacialAreaRegion
from deepface.detectors import DetectorWrapper
from deepface.commons import image_utils
@@ -24,6 +25,7 @@ def extract_faces(
align: bool = True,
expand_percentage: int = 0,
grayscale: bool = False,
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Extract faces from a given image
@@ -46,6 +48,8 @@ def extract_faces(
grayscale (boolean): Flag to convert the image to grayscale before
processing (default is False).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
@@ -58,6 +62,12 @@ def extract_faces(
to the person itself instead of observer.
- "confidence" (float): The confidence score associated with the detected face.
+
+ - "is_real" (boolean): antispoofing analyze result. this key is just available in the
+ result only if anti_spoofing is set to True in input arguments.
+
+ - "antispoof_score" (float): score of antispoofing analyze result. this key is
+ just available in the result only if anti_spoofing is set to True in input arguments.
"""
resp_objs = []
@@ -109,20 +119,31 @@ def extract_faces(
current_img = current_img / 255 # normalize input in [0, 1]
- resp_objs.append(
- {
- "face": current_img[:, :, ::-1],
- "facial_area": {
- "x": int(current_region.x),
- "y": int(current_region.y),
- "w": int(current_region.w),
- "h": int(current_region.h),
- "left_eye": current_region.left_eye,
- "right_eye": current_region.right_eye,
- },
- "confidence": round(current_region.confidence, 2),
- }
- )
+ x = int(current_region.x)
+ y = int(current_region.y)
+ w = int(current_region.w)
+ h = int(current_region.h)
+
+ resp_obj = {
+ "face": current_img[:, :, ::-1],
+ "facial_area": {
+ "x": x,
+ "y": y,
+ "w": w,
+ "h": h,
+ "left_eye": current_region.left_eye,
+ "right_eye": current_region.right_eye,
+ },
+ "confidence": round(current_region.confidence, 2),
+ }
+
+ if anti_spoofing is True:
+ antispoof_model = modeling.build_model(model_name="Fasnet")
+ is_real, antispoof_score = antispoof_model.analyze(img=img, facial_area=(x, y, w, h))
+ resp_obj["is_real"] = is_real
+ resp_obj["antispoof_score"] = antispoof_score
+
+ resp_objs.append(resp_obj)
if len(resp_objs) == 0 and enforce_detection == True:
raise ValueError(
diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py
index b40dcb5..60b6a71 100644
--- a/deepface/modules/modeling.py
+++ b/deepface/modules/modeling.py
@@ -11,9 +11,10 @@ from deepface.basemodels import (
SFace,
Dlib,
Facenet,
- GhostFaceNet
+ GhostFaceNet,
)
from deepface.extendedmodels import Age, Gender, Race, Emotion
+from deepface.spoofmodels import FasNet
def build_model(model_name: str) -> Any:
@@ -46,6 +47,7 @@ def build_model(model_name: str) -> Any:
"Age": Age.ApparentAgeClient,
"Gender": Gender.GenderClient,
"Race": Race.RaceClient,
+ "Fasnet": FasNet.Fasnet,
}
if not "model_obj" in globals():
diff --git a/deepface/modules/recognition.py b/deepface/modules/recognition.py
index 60038a5..4eda51f 100644
--- a/deepface/modules/recognition.py
+++ b/deepface/modules/recognition.py
@@ -30,6 +30,7 @@ def find(
normalization: str = "base",
silent: bool = False,
refresh_database: bool = True,
+ anti_spoofing: bool = False,
) -> List[pd.DataFrame]:
"""
Identify individuals in a database
@@ -69,8 +70,10 @@ def find(
silent (boolean): Suppress or allow some log messages for a quieter analysis process.
refresh_database (boolean): Synchronizes the images representation (pkl) file with the
- directory/db files, if set to false, it will ignore any file changes inside the db_path
- directory (default is True).
+ directory/db files, if set to false, it will ignore any file changes inside the db_path
+ directory (default is True).
+
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
Returns:
@@ -241,11 +244,14 @@ def find(
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
+ anti_spoofing=anti_spoofing,
)
resp_obj = []
for source_obj in source_objs:
+ if anti_spoofing is True and source_obj.get("is_real", True) is False:
+ raise ValueError("Spoof detected in the given image.")
source_img = source_obj["face"]
source_region = source_obj["facial_area"]
target_embedding_obj = representation.represent(
diff --git a/deepface/modules/representation.py b/deepface/modules/representation.py
index 8e6f7cc..b228288 100644
--- a/deepface/modules/representation.py
+++ b/deepface/modules/representation.py
@@ -18,6 +18,7 @@ def represent(
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
+ anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Represent facial images as multi-dimensional vector embeddings.
@@ -43,6 +44,8 @@ def represent(
normalization (string): Normalize the input image before feeding it to the model.
Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
results (List[Dict[str, Any]]): A list of dictionaries, each containing the
following fields:
@@ -72,6 +75,7 @@ def represent(
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
+ anti_spoofing=anti_spoofing,
)
else: # skip
# Try load. If load error, will raise exception internal
@@ -91,6 +95,8 @@ def represent(
# ---------------------------------
for img_obj in img_objs:
+ if anti_spoofing is True and img_obj.get("is_real", True) is False:
+ raise ValueError("Spoof detected in the given image.")
img = img_obj["face"]
# rgb to bgr
diff --git a/deepface/modules/streaming.py b/deepface/modules/streaming.py
index 95d05d9..b6b579d 100644
--- a/deepface/modules/streaming.py
+++ b/deepface/modules/streaming.py
@@ -21,7 +21,7 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
IDENTIFIED_IMG_SIZE = 112
TEXT_COLOR = (255, 255, 255)
-
+# pylint: disable=unused-variable
def analysis(
db_path: str,
model_name="VGG-Face",
@@ -31,6 +31,7 @@ def analysis(
source=0,
time_threshold=5,
frame_threshold=5,
+ anti_spoofing: bool = False,
):
"""
Run real time face recognition and facial attribute analysis
@@ -57,6 +58,9 @@ def analysis(
time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
frame_threshold (int): The frame threshold for face recognition (default is 5).
+
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
None
"""
@@ -89,7 +93,9 @@ def analysis(
faces_coordinates = []
if freeze is False:
- faces_coordinates = grab_facial_areas(img=img, detector_backend=detector_backend)
+ faces_coordinates = grab_facial_areas(
+ img=img, detector_backend=detector_backend, anti_spoofing=anti_spoofing
+ )
# we will pass img to analyze modules (identity, demography) and add some illustrations
# that is why, we will not be able to extract detected face from img clearly
@@ -108,7 +114,9 @@ def analysis(
freeze = num_frames_with_faces > 0 and num_frames_with_faces % frame_threshold == 0
if freeze:
# add analyze results into img - derive from raw_img
- img = highlight_facial_areas(img=raw_img, faces_coordinates=faces_coordinates)
+ img = highlight_facial_areas(
+ img=raw_img, faces_coordinates=faces_coordinates, anti_spoofing=anti_spoofing
+ )
# age, gender and emotion analysis
img = perform_demography_analysis(
@@ -268,25 +276,37 @@ def build_demography_models(enable_face_analysis: bool) -> None:
def highlight_facial_areas(
- img: np.ndarray, faces_coordinates: List[Tuple[int, int, int, int]]
+ img: np.ndarray,
+ faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
+ anti_spoofing: bool = False,
) -> np.ndarray:
"""
Highlight detected faces with rectangles in the given image
Args:
img (np.ndarray): image itself
faces_coordinates (list): list of face coordinates as tuple with x, y, w and h
+ also is_real and antispoof_score keys
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
Returns:
img (np.ndarray): image with highlighted facial areas
"""
- for x, y, w, h in faces_coordinates:
+ for x, y, w, h, is_real, antispoof_score in faces_coordinates:
# highlight facial area with rectangle
- cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67), 1)
+
+ if anti_spoofing is False:
+ color = (67, 67, 67)
+ else:
+ if is_real is True:
+ color = (0, 255, 0)
+ else:
+ color = (0, 0, 255)
+ cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
return img
def countdown_to_freeze(
img: np.ndarray,
- faces_coordinates: List[Tuple[int, int, int, int]],
+ faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
frame_threshold: int,
num_frames_with_faces: int,
) -> np.ndarray:
@@ -300,7 +320,7 @@ def countdown_to_freeze(
Returns:
img (np.ndarray): image with counter values
"""
- for x, y, w, h in faces_coordinates:
+ for x, y, w, h, is_real, antispoof_score in faces_coordinates:
cv2.putText(
img,
str(frame_threshold - (num_frames_with_faces % frame_threshold)),
@@ -344,8 +364,8 @@ def countdown_to_release(
def grab_facial_areas(
- img: np.ndarray, detector_backend: str, threshold: int = 130
-) -> List[Tuple[int, int, int, int]]:
+ img: np.ndarray, detector_backend: str, threshold: int = 130, anti_spoofing: bool = False
+) -> List[Tuple[int, int, int, int, bool, float]]:
"""
Find facial area coordinates in the given image
Args:
@@ -363,6 +383,7 @@ def grab_facial_areas(
detector_backend=detector_backend,
# you may consider to extract with larger expanding value
expand_percentage=0,
+ anti_spoofing=anti_spoofing,
)
faces = [
(
@@ -370,6 +391,8 @@ def grab_facial_areas(
face_obj["facial_area"]["y"],
face_obj["facial_area"]["w"],
face_obj["facial_area"]["h"],
+ face_obj.get("is_real", True),
+ face_obj.get("antispoof_score", 0),
)
for face_obj in face_objs
if face_obj["facial_area"]["w"] > threshold
@@ -380,19 +403,19 @@ def grab_facial_areas(
def extract_facial_areas(
- img: np.ndarray, faces_coordinates: List[Tuple[int, int, int, int]]
+ img: np.ndarray, faces_coordinates: List[Tuple[int, int, int, int, bool, float]]
) -> List[np.ndarray]:
"""
Extract facial areas as numpy array from given image
Args:
img (np.ndarray): image itself
faces_coordinates (list): list of facial area coordinates as tuple with
- x, y, w and h values
+ x, y, w and h values also is_real and antispoof_score keys
Returns:
detected_faces (list): list of detected facial area images
"""
detected_faces = []
- for x, y, w, h in faces_coordinates:
+ for x, y, w, h, is_real, antispoof_score in faces_coordinates:
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
detected_faces.append(detected_face)
return detected_faces
@@ -401,7 +424,7 @@ def extract_facial_areas(
def perform_facial_recognition(
img: np.ndarray,
detected_faces: List[np.ndarray],
- faces_coordinates: List[Tuple[int, int, int, int]],
+ faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
db_path: str,
detector_backend: str,
distance_metric: str,
@@ -413,7 +436,7 @@ def perform_facial_recognition(
img (np.ndarray): image itself
detected_faces (list): list of extracted detected face images as numpy
faces_coordinates (list): list of facial area coordinates as tuple with
- x, y, w and h values
+ x, y, w and h values also is_real and antispoof_score keys
db_path (string): Path to the folder containing image files. All detected faces
in the database will be considered in the decision-making process.
detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
@@ -426,7 +449,7 @@ def perform_facial_recognition(
Returns:
img (np.ndarray): image with identified face informations
"""
- for idx, (x, y, w, h) in enumerate(faces_coordinates):
+ for idx, (x, y, w, h, is_real, antispoof_score) in enumerate(faces_coordinates):
detected_face = detected_faces[idx]
target_label, target_img = search_identity(
detected_face=detected_face,
@@ -454,7 +477,7 @@ def perform_facial_recognition(
def perform_demography_analysis(
enable_face_analysis: bool,
img: np.ndarray,
- faces_coordinates: List[Tuple[int, int, int, int]],
+ faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
detected_faces: List[np.ndarray],
) -> np.ndarray:
"""
@@ -463,14 +486,14 @@ def perform_demography_analysis(
enable_face_analysis (bool): Flag to enable face analysis.
img (np.ndarray): image itself
faces_coordinates (list): list of face coordinates as tuple with
- x, y, w and h values
+ x, y, w and h values also is_real and antispoof_score keys
detected_faces (list): list of extracted detected face images as numpy
Returns:
img (np.ndarray): image with analyzed demography information
"""
if enable_face_analysis is False:
return img
- for idx, (x, y, w, h) in enumerate(faces_coordinates):
+ for idx, (x, y, w, h, is_real, antispoof_score) in enumerate(faces_coordinates):
detected_face = detected_faces[idx]
demographies = DeepFace.analyze(
img_path=detected_face,
diff --git a/deepface/modules/verification.py b/deepface/modules/verification.py
index 5727ef3..8b03ed4 100644
--- a/deepface/modules/verification.py
+++ b/deepface/modules/verification.py
@@ -25,6 +25,7 @@ def verify(
normalization: str = "base",
silent: bool = False,
threshold: Optional[float] = None,
+ anti_spoofing: bool = False,
) -> Dict[str, Any]:
"""
Verify if an image pair represents the same person or different persons.
@@ -70,6 +71,8 @@ def verify(
If left unset, default pre-tuned threshold values will be applied based on the specified
model name and distance metric (default is None).
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
+
Returns:
result (dict): A dictionary containing verification results.
@@ -132,6 +135,7 @@ def verify(
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
+ anti_spoofing=anti_spoofing,
)
except ValueError as err:
raise ValueError("Exception while processing img1_path") from err
@@ -168,6 +172,7 @@ def verify(
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
+ anti_spoofing=anti_spoofing,
)
except ValueError as err:
raise ValueError("Exception while processing img2_path") from err
@@ -220,6 +225,7 @@ def __extract_faces_and_embeddings(
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
+ anti_spoofing: bool = False,
) -> Tuple[List[List[float]], List[dict]]:
"""
Extract facial areas and find corresponding embeddings for given image
@@ -237,10 +243,13 @@ def __extract_faces_and_embeddings(
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
+ anti_spoofing=anti_spoofing,
)
# find embeddings for each face
for img_obj in img_objs:
+ if anti_spoofing is True and img_obj.get("is_real", True) is False:
+ raise ValueError("Spoof detected in given image.")
img_embedding_obj = representation.represent(
img_path=img_obj["face"],
model_name=model_name,
diff --git a/deepface/spoofmodels/FasNet.py b/deepface/spoofmodels/FasNet.py
new file mode 100644
index 0000000..8d1f988
--- /dev/null
+++ b/deepface/spoofmodels/FasNet.py
@@ -0,0 +1,222 @@
+# Minivision's Silent-Face-Anti-Spoofing Repo licensed under Apache License 2.0
+# Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
+
+# built-in dependencies
+from typing import Union
+
+# 3rd party dependencies
+import cv2
+import numpy as np
+
+# project dependencies
+from deepface.commons import folder_utils, file_utils, logger as log
+
+logger = log.get_singletonish_logger()
+
+# pylint: disable=line-too-long, too-few-public-methods
+class Fasnet:
+ """
+ Mini Face Anti Spoofing Net Library from repo: github.com/minivision-ai/Silent-Face-Anti-Spoofing
+ """
+
+ def __init__(self):
+ # pytorch is an opitonal dependency, enforce it to be installed if class imported
+ try:
+ import torch
+ except Exception as err:
+ raise ValueError(
+ "You must install torch with `pip install pytorch` command to use face anti spoofing module"
+ ) from err
+
+ home = folder_utils.get_deepface_home()
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+ self.device = device
+
+ # download pre-trained models if not installed yet
+ file_utils.download_external_file(
+ file_name="2.7_80x80_MiniFASNetV2.pth",
+ exact_file_path=f"{home}/.deepface/weights/2.7_80x80_MiniFASNetV2.pth",
+ url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth",
+ )
+
+ file_utils.download_external_file(
+ file_name="4_0_0_80x80_MiniFASNetV1SE.pth",
+ exact_file_path=f"{home}/.deepface/weights/4_0_0_80x80_MiniFASNetV1SE.pth",
+ url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth",
+ )
+
+ # guarantees Fasnet imported and torch installed
+ from deepface.spoofmodels import FasNetBackbone
+
+ # Fasnet will use 2 distinct models to predict, then it will find the sum of predictions
+ # to make a final prediction
+
+ first_model = FasNetBackbone.MiniFASNetV2(conv6_kernel=(5, 5)).to(device)
+ second_model = FasNetBackbone.MiniFASNetV1SE(conv6_kernel=(5, 5)).to(device)
+
+ # load model weight for first model
+ state_dict = torch.load(
+ f"{home}/.deepface/weights/2.7_80x80_MiniFASNetV2.pth", map_location=device
+ )
+ keys = iter(state_dict)
+ first_layer_name = keys.__next__()
+
+ if first_layer_name.find("module.") >= 0:
+ from collections import OrderedDict
+
+ new_state_dict = OrderedDict()
+ for key, value in state_dict.items():
+ name_key = key[7:]
+ new_state_dict[name_key] = value
+ first_model.load_state_dict(new_state_dict)
+ else:
+ first_model.load_state_dict(state_dict)
+
+ # load model weight for second model
+ state_dict = torch.load(
+ f"{home}/.deepface/weights/4_0_0_80x80_MiniFASNetV1SE.pth", map_location=device
+ )
+ keys = iter(state_dict)
+ first_layer_name = keys.__next__()
+
+ if first_layer_name.find("module.") >= 0:
+ from collections import OrderedDict
+
+ new_state_dict = OrderedDict()
+ for key, value in state_dict.items():
+ name_key = key[7:]
+ new_state_dict[name_key] = value
+ second_model.load_state_dict(new_state_dict)
+ else:
+ second_model.load_state_dict(state_dict)
+
+ # evaluate models
+ _ = first_model.eval()
+ _ = second_model.eval()
+
+ self.first_model = first_model
+ self.second_model = second_model
+
+ def analyze(self, img: np.ndarray, facial_area: Union[list, tuple]):
+ """
+ Analyze a given image spoofed or not
+ Args:
+ img (np.ndarray): pre loaded image
+ facial_area (list or tuple): facial rectangle area coordinates with x, y, w, h respectively
+ Returns:
+ result (tuple): a result tuple consisting of is_real and score
+ """
+ import torch
+ import torch.nn.functional as F
+
+ x, y, w, h = facial_area
+ first_img = crop(img, (x, y, w, h), 2.7, 80, 80)
+ second_img = crop(img, (x, y, w, h), 4, 80, 80)
+
+ test_transform = Compose(
+ [
+ ToTensor(),
+ ]
+ )
+
+ first_img = test_transform(first_img)
+ first_img = first_img.unsqueeze(0).to(self.device)
+
+ second_img = test_transform(second_img)
+ second_img = second_img.unsqueeze(0).to(self.device)
+
+ with torch.no_grad():
+ first_result = self.first_model.forward(first_img)
+ first_result = F.softmax(first_result).cpu().numpy()
+
+ second_result = self.second_model.forward(second_img)
+ second_result = F.softmax(second_result).cpu().numpy()
+
+ prediction = np.zeros((1, 3))
+ prediction += first_result
+ prediction += second_result
+
+ label = np.argmax(prediction)
+ is_real = True if label == 1 else False # pylint: disable=simplifiable-if-expression
+ score = prediction[0][label] / 2
+
+ return is_real, score
+
+
+# subsdiary classes and functions
+
+
+def to_tensor(pic):
+ """Convert a ``numpy.ndarray`` to tensor.
+
+ See ``ToTensor`` for more details.
+
+ Args:
+ pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
+
+ Returns:
+ Tensor: Converted image.
+ """
+ import torch
+
+ # handle numpy array
+ # IR image channel=1: modify by lzc --> 20190730
+ if pic.ndim == 2:
+ pic = pic.reshape((pic.shape[0], pic.shape[1], 1))
+
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ # backward compatibility
+ # return img.float().div(255) modify by zkx
+ return img.float()
+
+
+class Compose:
+ def __init__(self, transforms):
+ self.transforms = transforms
+
+ def __call__(self, img):
+ for t in self.transforms:
+ img = t(img)
+ return img
+
+
+class ToTensor:
+ def __call__(self, pic):
+ return to_tensor(pic)
+
+
+def _get_new_box(src_w, src_h, bbox, scale):
+ x = bbox[0]
+ y = bbox[1]
+ box_w = bbox[2]
+ box_h = bbox[3]
+ # pylint: disable=nested-min-max
+ scale = min((src_h - 1) / box_h, min((src_w - 1) / box_w, scale))
+ new_width = box_w * scale
+ new_height = box_h * scale
+ center_x, center_y = box_w / 2 + x, box_h / 2 + y
+ left_top_x = center_x - new_width / 2
+ left_top_y = center_y - new_height / 2
+ right_bottom_x = center_x + new_width / 2
+ right_bottom_y = center_y + new_height / 2
+ if left_top_x < 0:
+ right_bottom_x -= left_top_x
+ left_top_x = 0
+ if left_top_y < 0:
+ right_bottom_y -= left_top_y
+ left_top_y = 0
+ if right_bottom_x > src_w - 1:
+ left_top_x -= right_bottom_x - src_w + 1
+ right_bottom_x = src_w - 1
+ if right_bottom_y > src_h - 1:
+ left_top_y -= right_bottom_y - src_h + 1
+ right_bottom_y = src_h - 1
+ return int(left_top_x), int(left_top_y), int(right_bottom_x), int(right_bottom_y)
+
+
+def crop(org_img, bbox, scale, out_w, out_h):
+ src_h, src_w, _ = np.shape(org_img)
+ left_top_x, left_top_y, right_bottom_x, right_bottom_y = _get_new_box(src_w, src_h, bbox, scale)
+ img = org_img[left_top_y : right_bottom_y + 1, left_top_x : right_bottom_x + 1]
+ dst_img = cv2.resize(img, (out_w, out_h))
+ return dst_img
diff --git a/deepface/spoofmodels/FasNetBackbone.py b/deepface/spoofmodels/FasNetBackbone.py
new file mode 100644
index 0000000..abfb6ce
--- /dev/null
+++ b/deepface/spoofmodels/FasNetBackbone.py
@@ -0,0 +1,524 @@
+# These classes are copied from Minivision's Silent-Face-Anti-Spoofing Repo
+# licensed under Apache License 2.0
+# Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
+
+# 3rd party dependencies
+import torch
+from torch.nn import (
+ Linear,
+ Conv2d,
+ BatchNorm1d,
+ BatchNorm2d,
+ PReLU,
+ ReLU,
+ Sigmoid,
+ AdaptiveAvgPool2d,
+ Sequential,
+ Module,
+)
+
+# pylint: disable=super-with-arguments, too-many-instance-attributes, unused-argument, redefined-builtin, too-few-public-methods
+
+keep_dict = {
+ "1.8M": [
+ 32,
+ 32,
+ 103,
+ 103,
+ 64,
+ 13,
+ 13,
+ 64,
+ 26,
+ 26,
+ 64,
+ 13,
+ 13,
+ 64,
+ 52,
+ 52,
+ 64,
+ 231,
+ 231,
+ 128,
+ 154,
+ 154,
+ 128,
+ 52,
+ 52,
+ 128,
+ 26,
+ 26,
+ 128,
+ 52,
+ 52,
+ 128,
+ 26,
+ 26,
+ 128,
+ 26,
+ 26,
+ 128,
+ 308,
+ 308,
+ 128,
+ 26,
+ 26,
+ 128,
+ 26,
+ 26,
+ 128,
+ 512,
+ 512,
+ ],
+ "1.8M_": [
+ 32,
+ 32,
+ 103,
+ 103,
+ 64,
+ 13,
+ 13,
+ 64,
+ 13,
+ 13,
+ 64,
+ 13,
+ 13,
+ 64,
+ 13,
+ 13,
+ 64,
+ 231,
+ 231,
+ 128,
+ 231,
+ 231,
+ 128,
+ 52,
+ 52,
+ 128,
+ 26,
+ 26,
+ 128,
+ 77,
+ 77,
+ 128,
+ 26,
+ 26,
+ 128,
+ 26,
+ 26,
+ 128,
+ 308,
+ 308,
+ 128,
+ 26,
+ 26,
+ 128,
+ 26,
+ 26,
+ 128,
+ 512,
+ 512,
+ ],
+}
+
+
+def MiniFASNetV2(embedding_size=128, conv6_kernel=(7, 7), drop_p=0.2, num_classes=3, img_channel=3):
+ return MiniFASNet(
+ keep_dict["1.8M_"], embedding_size, conv6_kernel, drop_p, num_classes, img_channel
+ )
+
+
+def MiniFASNetV1SE(
+ embedding_size=128, conv6_kernel=(7, 7), drop_p=0.75, num_classes=3, img_channel=3
+):
+ return MiniFASNetSE(
+ keep_dict["1.8M"], embedding_size, conv6_kernel, drop_p, num_classes, img_channel
+ )
+
+
+class Flatten(Module):
+ def forward(self, input):
+ return input.view(input.size(0), -1)
+
+
+class Conv_block(Module):
+ def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
+ super(Conv_block, self).__init__()
+ self.conv = Conv2d(
+ in_c,
+ out_c,
+ kernel_size=kernel,
+ groups=groups,
+ stride=stride,
+ padding=padding,
+ bias=False,
+ )
+ self.bn = BatchNorm2d(out_c)
+ self.prelu = PReLU(out_c)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.prelu(x)
+ return x
+
+
+class Linear_block(Module):
+ def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
+ super(Linear_block, self).__init__()
+ self.conv = Conv2d(
+ in_c,
+ out_channels=out_c,
+ kernel_size=kernel,
+ groups=groups,
+ stride=stride,
+ padding=padding,
+ bias=False,
+ )
+ self.bn = BatchNorm2d(out_c)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ return x
+
+
+class Depth_Wise(Module):
+ def __init__(
+ self, c1, c2, c3, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1
+ ):
+ super(Depth_Wise, self).__init__()
+ c1_in, c1_out = c1
+ c2_in, c2_out = c2
+ c3_in, c3_out = c3
+ self.conv = Conv_block(c1_in, out_c=c1_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
+ self.conv_dw = Conv_block(
+ c2_in, c2_out, groups=c2_in, kernel=kernel, padding=padding, stride=stride
+ )
+ self.project = Linear_block(c3_in, c3_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
+ self.residual = residual
+
+ def forward(self, x):
+ if self.residual:
+ short_cut = x
+ x = self.conv(x)
+ x = self.conv_dw(x)
+ x = self.project(x)
+ if self.residual:
+ output = short_cut + x
+ else:
+ output = x
+ return output
+
+
+class Depth_Wise_SE(Module):
+ def __init__(
+ self,
+ c1,
+ c2,
+ c3,
+ residual=False,
+ kernel=(3, 3),
+ stride=(2, 2),
+ padding=(1, 1),
+ groups=1,
+ se_reduct=8,
+ ):
+ super(Depth_Wise_SE, self).__init__()
+ c1_in, c1_out = c1
+ c2_in, c2_out = c2
+ c3_in, c3_out = c3
+ self.conv = Conv_block(c1_in, out_c=c1_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
+ self.conv_dw = Conv_block(
+ c2_in, c2_out, groups=c2_in, kernel=kernel, padding=padding, stride=stride
+ )
+ self.project = Linear_block(c3_in, c3_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
+ self.residual = residual
+ self.se_module = SEModule(c3_out, se_reduct)
+
+ def forward(self, x):
+ if self.residual:
+ short_cut = x
+ x = self.conv(x)
+ x = self.conv_dw(x)
+ x = self.project(x)
+ if self.residual:
+ x = self.se_module(x)
+ output = short_cut + x
+ else:
+ output = x
+ return output
+
+
+class SEModule(Module):
+ def __init__(self, channels, reduction):
+ super(SEModule, self).__init__()
+ self.avg_pool = AdaptiveAvgPool2d(1)
+ self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
+ self.bn1 = BatchNorm2d(channels // reduction)
+ self.relu = ReLU(inplace=True)
+ self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
+ self.bn2 = BatchNorm2d(channels)
+ self.sigmoid = Sigmoid()
+
+ def forward(self, x):
+ module_input = x
+ x = self.avg_pool(x)
+ x = self.fc1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.fc2(x)
+ x = self.bn2(x)
+ x = self.sigmoid(x)
+ return module_input * x
+
+
+class Residual(Module):
+ def __init__(self, c1, c2, c3, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
+ super(Residual, self).__init__()
+ modules = []
+ for i in range(num_block):
+ c1_tuple = c1[i]
+ c2_tuple = c2[i]
+ c3_tuple = c3[i]
+ modules.append(
+ Depth_Wise(
+ c1_tuple,
+ c2_tuple,
+ c3_tuple,
+ residual=True,
+ kernel=kernel,
+ padding=padding,
+ stride=stride,
+ groups=groups,
+ )
+ )
+ self.model = Sequential(*modules)
+
+ def forward(self, x):
+ return self.model(x)
+
+
+class ResidualSE(Module):
+ def __init__(
+ self,
+ c1,
+ c2,
+ c3,
+ num_block,
+ groups,
+ kernel=(3, 3),
+ stride=(1, 1),
+ padding=(1, 1),
+ se_reduct=4,
+ ):
+ super(ResidualSE, self).__init__()
+ modules = []
+ for i in range(num_block):
+ c1_tuple = c1[i]
+ c2_tuple = c2[i]
+ c3_tuple = c3[i]
+ if i == num_block - 1:
+ modules.append(
+ Depth_Wise_SE(
+ c1_tuple,
+ c2_tuple,
+ c3_tuple,
+ residual=True,
+ kernel=kernel,
+ padding=padding,
+ stride=stride,
+ groups=groups,
+ se_reduct=se_reduct,
+ )
+ )
+ else:
+ modules.append(
+ Depth_Wise(
+ c1_tuple,
+ c2_tuple,
+ c3_tuple,
+ residual=True,
+ kernel=kernel,
+ padding=padding,
+ stride=stride,
+ groups=groups,
+ )
+ )
+ self.model = Sequential(*modules)
+
+ def forward(self, x):
+ return self.model(x)
+
+
+class MiniFASNet(Module):
+ def __init__(
+ self, keep, embedding_size, conv6_kernel=(7, 7), drop_p=0.0, num_classes=3, img_channel=3
+ ):
+ super(MiniFASNet, self).__init__()
+ self.embedding_size = embedding_size
+
+ self.conv1 = Conv_block(img_channel, keep[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1))
+ self.conv2_dw = Conv_block(
+ keep[0], keep[1], kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=keep[1]
+ )
+
+ c1 = [(keep[1], keep[2])]
+ c2 = [(keep[2], keep[3])]
+ c3 = [(keep[3], keep[4])]
+
+ self.conv_23 = Depth_Wise(
+ c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[3]
+ )
+
+ c1 = [(keep[4], keep[5]), (keep[7], keep[8]), (keep[10], keep[11]), (keep[13], keep[14])]
+ c2 = [(keep[5], keep[6]), (keep[8], keep[9]), (keep[11], keep[12]), (keep[14], keep[15])]
+ c3 = [(keep[6], keep[7]), (keep[9], keep[10]), (keep[12], keep[13]), (keep[15], keep[16])]
+
+ self.conv_3 = Residual(
+ c1, c2, c3, num_block=4, groups=keep[4], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
+
+ c1 = [(keep[16], keep[17])]
+ c2 = [(keep[17], keep[18])]
+ c3 = [(keep[18], keep[19])]
+
+ self.conv_34 = Depth_Wise(
+ c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[19]
+ )
+
+ c1 = [
+ (keep[19], keep[20]),
+ (keep[22], keep[23]),
+ (keep[25], keep[26]),
+ (keep[28], keep[29]),
+ (keep[31], keep[32]),
+ (keep[34], keep[35]),
+ ]
+ c2 = [
+ (keep[20], keep[21]),
+ (keep[23], keep[24]),
+ (keep[26], keep[27]),
+ (keep[29], keep[30]),
+ (keep[32], keep[33]),
+ (keep[35], keep[36]),
+ ]
+ c3 = [
+ (keep[21], keep[22]),
+ (keep[24], keep[25]),
+ (keep[27], keep[28]),
+ (keep[30], keep[31]),
+ (keep[33], keep[34]),
+ (keep[36], keep[37]),
+ ]
+
+ self.conv_4 = Residual(
+ c1, c2, c3, num_block=6, groups=keep[19], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
+
+ c1 = [(keep[37], keep[38])]
+ c2 = [(keep[38], keep[39])]
+ c3 = [(keep[39], keep[40])]
+
+ self.conv_45 = Depth_Wise(
+ c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[40]
+ )
+
+ c1 = [(keep[40], keep[41]), (keep[43], keep[44])]
+ c2 = [(keep[41], keep[42]), (keep[44], keep[45])]
+ c3 = [(keep[42], keep[43]), (keep[45], keep[46])]
+
+ self.conv_5 = Residual(
+ c1, c2, c3, num_block=2, groups=keep[40], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
+ self.conv_6_sep = Conv_block(
+ keep[46], keep[47], kernel=(1, 1), stride=(1, 1), padding=(0, 0)
+ )
+ self.conv_6_dw = Linear_block(
+ keep[47], keep[48], groups=keep[48], kernel=conv6_kernel, stride=(1, 1), padding=(0, 0)
+ )
+ self.conv_6_flatten = Flatten()
+ self.linear = Linear(512, embedding_size, bias=False)
+ self.bn = BatchNorm1d(embedding_size)
+ self.drop = torch.nn.Dropout(p=drop_p)
+ self.prob = Linear(embedding_size, num_classes, bias=False)
+
+ def forward(self, x):
+ out = self.conv1(x)
+ out = self.conv2_dw(out)
+ out = self.conv_23(out)
+ out = self.conv_3(out)
+ out = self.conv_34(out)
+ out = self.conv_4(out)
+ out = self.conv_45(out)
+ out = self.conv_5(out)
+ out = self.conv_6_sep(out)
+ out = self.conv_6_dw(out)
+ out = self.conv_6_flatten(out)
+ if self.embedding_size != 512:
+ out = self.linear(out)
+ out = self.bn(out)
+ out = self.drop(out)
+ out = self.prob(out)
+ return out
+
+
+class MiniFASNetSE(MiniFASNet):
+ def __init__(
+ self, keep, embedding_size, conv6_kernel=(7, 7), drop_p=0.75, num_classes=4, img_channel=3
+ ):
+ super(MiniFASNetSE, self).__init__(
+ keep=keep,
+ embedding_size=embedding_size,
+ conv6_kernel=conv6_kernel,
+ drop_p=drop_p,
+ num_classes=num_classes,
+ img_channel=img_channel,
+ )
+
+ c1 = [(keep[4], keep[5]), (keep[7], keep[8]), (keep[10], keep[11]), (keep[13], keep[14])]
+ c2 = [(keep[5], keep[6]), (keep[8], keep[9]), (keep[11], keep[12]), (keep[14], keep[15])]
+ c3 = [(keep[6], keep[7]), (keep[9], keep[10]), (keep[12], keep[13]), (keep[15], keep[16])]
+
+ self.conv_3 = ResidualSE(
+ c1, c2, c3, num_block=4, groups=keep[4], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
+
+ c1 = [
+ (keep[19], keep[20]),
+ (keep[22], keep[23]),
+ (keep[25], keep[26]),
+ (keep[28], keep[29]),
+ (keep[31], keep[32]),
+ (keep[34], keep[35]),
+ ]
+ c2 = [
+ (keep[20], keep[21]),
+ (keep[23], keep[24]),
+ (keep[26], keep[27]),
+ (keep[29], keep[30]),
+ (keep[32], keep[33]),
+ (keep[35], keep[36]),
+ ]
+ c3 = [
+ (keep[21], keep[22]),
+ (keep[24], keep[25]),
+ (keep[27], keep[28]),
+ (keep[30], keep[31]),
+ (keep[33], keep[34]),
+ (keep[36], keep[37]),
+ ]
+
+ self.conv_4 = ResidualSE(
+ c1, c2, c3, num_block=6, groups=keep[19], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
+
+ c1 = [(keep[40], keep[41]), (keep[43], keep[44])]
+ c2 = [(keep[41], keep[42]), (keep[44], keep[45])]
+ c3 = [(keep[42], keep[43]), (keep[45], keep[46])]
+ self.conv_5 = ResidualSE(
+ c1, c2, c3, num_block=2, groups=keep[40], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
+ )
diff --git a/deepface/spoofmodels/__init__.py b/deepface/spoofmodels/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/icon/face-anti-spoofing.jpg b/icon/face-anti-spoofing.jpg
new file mode 100644
index 0000000..bade126
Binary files /dev/null and b/icon/face-anti-spoofing.jpg differ
diff --git a/requirements_additional.txt b/requirements_additional.txt
index 0344661..ea76fde 100644
--- a/requirements_additional.txt
+++ b/requirements_additional.txt
@@ -2,4 +2,5 @@ opencv-contrib-python>=4.3.0.36
mediapipe>=0.8.7.3
dlib>=19.20.0
ultralytics>=8.0.122
-facenet-pytorch>=2.5.3
\ No newline at end of file
+facenet-pytorch>=2.5.3
+torch>=2.1.2
\ No newline at end of file
diff --git a/tests/stream.py b/tests/stream.py
index 6c041bf..d0cd3c9 100644
--- a/tests/stream.py
+++ b/tests/stream.py
@@ -1,8 +1,8 @@
from deepface import DeepFace
-DeepFace.stream("dataset") #opencv
-#DeepFace.stream("dataset", detector_backend = 'opencv')
-#DeepFace.stream("dataset", detector_backend = 'ssd')
-#DeepFace.stream("dataset", detector_backend = 'mtcnn')
-#DeepFace.stream("dataset", detector_backend = 'dlib')
-#DeepFace.stream("dataset", detector_backend = 'retinaface')
+DeepFace.stream("dataset", enable_face_analysis=False, anti_spoofing=True) # opencv
+# DeepFace.stream("dataset", detector_backend = 'opencv')
+# DeepFace.stream("dataset", detector_backend = 'ssd')
+# DeepFace.stream("dataset", detector_backend = 'mtcnn')
+# DeepFace.stream("dataset", detector_backend = 'dlib')
+# DeepFace.stream("dataset", detector_backend = 'retinaface')