mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 03:55:21 +00:00
weight downloads moved to a common place
This commit is contained in:
parent
0d1bce3618
commit
9d8d2ddf4f
@ -1,31 +0,0 @@
|
|||||||
# built-in dependencies
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 3rd party dependencies
|
|
||||||
import gdown
|
|
||||||
|
|
||||||
# project dependencies
|
|
||||||
from deepface.commons.logger import Logger
|
|
||||||
|
|
||||||
logger = Logger()
|
|
||||||
|
|
||||||
|
|
||||||
def download_external_file(file_name: str, exact_file_path: str, url: str) -> None:
|
|
||||||
"""
|
|
||||||
Download an external file
|
|
||||||
Args:
|
|
||||||
file_name (str): file name with extension
|
|
||||||
exact_file_path (str): exact location of the file with file name
|
|
||||||
url (str): url to be downloaded
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
if not os.path.exists(exact_file_path):
|
|
||||||
logger.info(f"Downloading MiniFASNetV2 weights to {exact_file_path}")
|
|
||||||
try:
|
|
||||||
gdown.download(url, exact_file_path, quiet=False)
|
|
||||||
except Exception as err:
|
|
||||||
raise ValueError(
|
|
||||||
f"Exception while downloading {file_name} from {url} to {exact_file_path}."
|
|
||||||
"You may consider to download it and copy to the target destination."
|
|
||||||
) from err
|
|
65
deepface/commons/weight_utils.py
Normal file
65
deepface/commons/weight_utils.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# built-in dependencies
|
||||||
|
import os
|
||||||
|
from typing import Optional
|
||||||
|
import zipfile
|
||||||
|
import bz2
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
|
import gdown
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.commons import folder_utils
|
||||||
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
|
logger = Logger()
|
||||||
|
|
||||||
|
|
||||||
|
def download_weights_if_necessary(
|
||||||
|
file_name: str, source_url: str, compress_type: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Download the weights of a pre-trained model from external source if not downloaded yet.
|
||||||
|
Args:
|
||||||
|
file_name (str): target file name with extension
|
||||||
|
source_url (url): source url to be downloaded
|
||||||
|
compress_type (optional str): compress type e.g. zip or bz2
|
||||||
|
Returns
|
||||||
|
target_file (str): exact path for the target file
|
||||||
|
"""
|
||||||
|
home = folder_utils.get_deepface_home()
|
||||||
|
|
||||||
|
target_file = os.path.join(home, ".deepface/weights", file_name)
|
||||||
|
|
||||||
|
if os.path.isfile(target_file):
|
||||||
|
logger.debug(f"{file_name} is already available at {target_file}")
|
||||||
|
return target_file
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.info(f"🔗 {file_name} will be downloaded from {source_url} to {target_file}...")
|
||||||
|
|
||||||
|
if compress_type is None:
|
||||||
|
gdown.download(source_url, target_file, quiet=False)
|
||||||
|
elif compress_type is not None:
|
||||||
|
gdown.download(source_url, f"{target_file}.{compress_type}", quiet=False)
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
exception_msg = (
|
||||||
|
f"⛓️💥 Exception while downloading {file_name} from {source_url}. "
|
||||||
|
f"You may consider to download it manually to {target_file}."
|
||||||
|
)
|
||||||
|
logger.error(exception_msg)
|
||||||
|
raise ValueError(exception_msg) from err
|
||||||
|
|
||||||
|
# uncompress downloaded file
|
||||||
|
if compress_type == "zip":
|
||||||
|
with zipfile.ZipFile(f"{target_file}.zip", "r") as zip_ref:
|
||||||
|
zip_ref.extractall(os.path.join(home, ".deepface/weights"))
|
||||||
|
logger.info("{target_file}.zip unzipped")
|
||||||
|
elif compress_type == "bz2":
|
||||||
|
bz2file = bz2.BZ2File(f"{target_file}.bz2")
|
||||||
|
data = bz2file.read()
|
||||||
|
with open(target_file, "wb") as f:
|
||||||
|
f.write(data)
|
||||||
|
logger.info("{target_file}.bz2 unzipped")
|
||||||
|
|
||||||
|
return target_file
|
@ -1,8 +1,9 @@
|
|||||||
import os
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
from deepface.models.facial_recognition import VGGFace
|
from deepface.models.facial_recognition import VGGFace
|
||||||
from deepface.commons import package_utils, folder_utils
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.Demography import Demography
|
from deepface.models.Demography import Demography
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -65,21 +66,13 @@ def load_model(
|
|||||||
# --------------------------
|
# --------------------------
|
||||||
|
|
||||||
# load weights
|
# load weights
|
||||||
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
home = folder_utils.get_deepface_home()
|
file_name="age_model_weights.h5", source_url=url
|
||||||
output = os.path.join(home, ".deepface/weights/age_model_weights.h5")
|
)
|
||||||
|
age_model.load_weights(weight_file)
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
age_model.load_weights(output)
|
|
||||||
|
|
||||||
return age_model
|
return age_model
|
||||||
|
|
||||||
# --------------------------
|
|
||||||
|
|
||||||
|
|
||||||
def find_apparent_age(age_predictions: np.ndarray) -> np.float64:
|
def find_apparent_age(age_predictions: np.ndarray) -> np.float64:
|
||||||
"""
|
"""
|
||||||
Find apparent age prediction from a given probas of ages
|
Find apparent age prediction from a given probas of ages
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
# built-in dependencies
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import package_utils, folder_utils
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.Demography import Demography
|
from deepface.models.Demography import Demography
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -96,13 +92,10 @@ def load_model(
|
|||||||
|
|
||||||
# ----------------------------
|
# ----------------------------
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/facial_expression_model_weights.h5")
|
file_name="facial_expression_model_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model.load_weights(weight_file)
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
# built-in dependencies
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.models.facial_recognition import VGGFace
|
from deepface.models.facial_recognition import VGGFace
|
||||||
from deepface.commons import package_utils, folder_utils
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.Demography import Demography
|
from deepface.models.Demography import Demography
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -72,14 +68,10 @@ def load_model(
|
|||||||
# --------------------------
|
# --------------------------
|
||||||
|
|
||||||
# load weights
|
# load weights
|
||||||
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
|
file_name="gender_model_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
gender_model.load_weights(weight_file)
|
||||||
output = os.path.join(home, ".deepface/weights/gender_model_weights.h5")
|
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
gender_model.load_weights(output)
|
|
||||||
|
|
||||||
return gender_model
|
return gender_model
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
# built-in dependencies
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.models.facial_recognition import VGGFace
|
from deepface.models.facial_recognition import VGGFace
|
||||||
from deepface.commons import package_utils, folder_utils
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.Demography import Demography
|
from deepface.models.Demography import Demography
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -69,14 +65,10 @@ def load_model(
|
|||||||
# --------------------------
|
# --------------------------
|
||||||
|
|
||||||
# load weights
|
# load weights
|
||||||
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
|
file_name="race_model_single_batch.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
race_model.load_weights(weight_file)
|
||||||
output = os.path.join(home, ".deepface/weights/race_model_single_batch.h5")
|
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
race_model.load_weights(output)
|
|
||||||
|
|
||||||
return race_model
|
return race_model
|
||||||
|
@ -5,10 +5,9 @@ from typing import List
|
|||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import gdown
|
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import folder_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -29,19 +28,9 @@ class CenterFaceClient(Detector):
|
|||||||
"""
|
"""
|
||||||
Download pre-trained weights of CenterFace model if necessary and load built model
|
Download pre-trained weights of CenterFace model if necessary and load built model
|
||||||
"""
|
"""
|
||||||
home = folder_utils.get_deepface_home()
|
weights_path = weight_utils.download_weights_if_necessary(
|
||||||
weights_path = os.path.join(home, ".deepface/weights/centerface.onnx")
|
file_name="centerface.onnx", source_url=WEIGHTS_URL
|
||||||
|
)
|
||||||
if not os.path.isfile(weights_path):
|
|
||||||
logger.info(f"Downloading CenterFace weights from {WEIGHTS_URL} to {weights_path}...")
|
|
||||||
try:
|
|
||||||
gdown.download(WEIGHTS_URL, weights_path, quiet=False)
|
|
||||||
except Exception as err:
|
|
||||||
raise ValueError(
|
|
||||||
f"Exception while downloading CenterFace weights from {WEIGHTS_URL}."
|
|
||||||
f"You may consider to download it to {weights_path} manually."
|
|
||||||
) from err
|
|
||||||
logger.info(f"CenterFace model is just downloaded to {os.path.basename(weights_path)}")
|
|
||||||
|
|
||||||
return CenterFace(weight_path=weights_path)
|
return CenterFace(weight_path=weights_path)
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
import os
|
|
||||||
import bz2
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from deepface.commons import folder_utils
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -30,25 +32,14 @@ class DlibClient(Detector):
|
|||||||
) from e
|
) from e
|
||||||
|
|
||||||
# check required file exists in the home/.deepface/weights folder
|
# check required file exists in the home/.deepface/weights folder
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
filename = "shape_predictor_5_face_landmarks.dat"
|
file_name="shape_predictor_5_face_landmarks.dat",
|
||||||
filepath = os.path.join(home, ".deepface/weights/", filename)
|
source_url="http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2",
|
||||||
|
compress_type="bz2",
|
||||||
if not os.path.isfile(filepath):
|
)
|
||||||
logger.info(f"{filename + '.bz2'} is going to be downloaded")
|
|
||||||
|
|
||||||
url = f"http://dlib.net/files/{filename + '.bz2'}"
|
|
||||||
output = filepath + ".bz2"
|
|
||||||
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
zipfile = bz2.BZ2File(output)
|
|
||||||
data = zipfile.read()
|
|
||||||
with open(filepath, "wb") as f:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
face_detector = dlib.get_frontal_face_detector()
|
face_detector = dlib.get_frontal_face_detector()
|
||||||
sp = dlib.shape_predictor(filepath)
|
sp = dlib.shape_predictor(weight_file)
|
||||||
|
|
||||||
detector = {}
|
detector = {}
|
||||||
detector["face_detector"] = face_detector
|
detector["face_detector"] = face_detector
|
||||||
|
@ -1,13 +1,19 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import Any, Union, List
|
from typing import Any, Union, List
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
|
||||||
|
|
||||||
# Link -> https://github.com/timesler/facenet-pytorch
|
# project dependencies
|
||||||
# Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
|
|
||||||
|
|
||||||
class FastMtCnnClient(Detector):
|
class FastMtCnnClient(Detector):
|
||||||
|
"""
|
||||||
|
Fast MtCnn Detector from github.com/timesler/facenet-pytorch
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.model = self.build_model()
|
self.model = self.build_model()
|
||||||
|
|
||||||
@ -69,7 +75,7 @@ class FastMtCnnClient(Detector):
|
|||||||
"Please install using 'pip install facenet-pytorch'"
|
"Please install using 'pip install facenet-pytorch'"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||||
face_detector = fast_mtcnn(device=device)
|
face_detector = fast_mtcnn(device=device)
|
||||||
|
|
||||||
return face_detector
|
return face_detector
|
||||||
|
@ -1,11 +1,18 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
import numpy as np
|
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
|
||||||
|
|
||||||
# Link - https://google.github.io/mediapipe/solutions/face_detection
|
# 3rd party dependencies
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
|
|
||||||
|
|
||||||
class MediaPipeClient(Detector):
|
class MediaPipeClient(Detector):
|
||||||
|
"""
|
||||||
|
MediaPipe from google.github.io/mediapipe/solutions/face_detection
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.model = self.build_model()
|
self.model = self.build_model()
|
||||||
|
|
||||||
@ -69,7 +76,13 @@ class MediaPipeClient(Detector):
|
|||||||
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
|
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
|
||||||
|
|
||||||
facial_area = FacialAreaRegion(
|
facial_area = FacialAreaRegion(
|
||||||
x=x, y=y, w=w, h=h, left_eye=left_eye, right_eye=right_eye, confidence=confidence
|
x=x,
|
||||||
|
y=y,
|
||||||
|
w=w,
|
||||||
|
h=h,
|
||||||
|
left_eye=left_eye,
|
||||||
|
right_eye=right_eye,
|
||||||
|
confidence=float(confidence),
|
||||||
)
|
)
|
||||||
resp.append(facial_area)
|
resp.append(facial_area)
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from mtcnn import MTCNN
|
from mtcnn import MTCNN
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
|
|
||||||
# pylint: disable=too-few-public-methods
|
# pylint: disable=too-few-public-methods
|
||||||
|
@ -1,7 +1,12 @@
|
|||||||
|
# built-in dependencies
|
||||||
import os
|
import os
|
||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
#project dependencies
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from retinaface import RetinaFace as rf
|
from retinaface import RetinaFace as rf
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
|
|
||||||
# pylint: disable=too-few-public-methods
|
# pylint: disable=too-few-public-methods
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
import os
|
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
import gdown
|
|
||||||
|
# 3rd party dependencies
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
# project dependencies
|
||||||
from deepface.models.face_detection import OpenCv
|
from deepface.models.face_detection import OpenCv
|
||||||
from deepface.commons import folder_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -25,21 +28,17 @@ class SsdClient(Detector):
|
|||||||
model (dict)
|
model (dict)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
|
||||||
|
|
||||||
# model structure
|
# model structure
|
||||||
output_model = os.path.join(home, ".deepface/weights/deploy.prototxt")
|
output_model = weight_utils.download_weights_if_necessary(
|
||||||
if not os.path.isfile(output_model):
|
file_name="deploy.prototxt",
|
||||||
logger.info(f"{os.path.basename(output_model)} will be downloaded...")
|
source_url="https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt",
|
||||||
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
|
)
|
||||||
gdown.download(url, output_model, quiet=False)
|
|
||||||
|
|
||||||
# pre-trained weights
|
# pre-trained weights
|
||||||
output_weights = os.path.join(home, ".deepface/weights/res10_300x300_ssd_iter_140000.caffemodel")
|
output_weights = weight_utils.download_weights_if_necessary(
|
||||||
if not os.path.isfile(output_weights):
|
file_name="res10_300x300_ssd_iter_140000.caffemodel",
|
||||||
logger.info(f"{os.path.basename(output_weights)} will be downloaded...")
|
source_url="https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel",
|
||||||
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
|
)
|
||||||
gdown.download(url, output_weights, quiet=False)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
face_detector = cv2.dnn.readNetFromCaffe(output_model, output_weights)
|
face_detector = cv2.dnn.readNetFromCaffe(output_model, output_weights)
|
||||||
@ -50,10 +49,7 @@ class SsdClient(Detector):
|
|||||||
+ "You can install it as pip install opencv-contrib-python."
|
+ "You can install it as pip install opencv-contrib-python."
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
return {
|
return {"face_detector": face_detector, "opencv_module": OpenCv.OpenCvClient()}
|
||||||
"face_detector": face_detector,
|
|
||||||
"opencv_module": OpenCv.OpenCvClient()
|
|
||||||
}
|
|
||||||
|
|
||||||
def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
|
def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
|
||||||
"""
|
"""
|
||||||
@ -97,11 +93,17 @@ class SsdClient(Detector):
|
|||||||
bottom = 6
|
bottom = 6
|
||||||
|
|
||||||
faces = detections[0][0]
|
faces = detections[0][0]
|
||||||
faces = faces[(faces[:, ssd_labels.is_face] == 1) & (faces[:, ssd_labels.confidence] >= 0.90)]
|
faces = faces[
|
||||||
|
(faces[:, ssd_labels.is_face] == 1) & (faces[:, ssd_labels.confidence] >= 0.90)
|
||||||
|
]
|
||||||
margins = [ssd_labels.left, ssd_labels.top, ssd_labels.right, ssd_labels.bottom]
|
margins = [ssd_labels.left, ssd_labels.top, ssd_labels.right, ssd_labels.bottom]
|
||||||
faces[:, margins] = np.int32(faces[:, margins] * 300)
|
faces[:, margins] = np.int32(faces[:, margins] * 300)
|
||||||
faces[:, margins] = np.int32(faces[:, margins] * [aspect_ratio_x, aspect_ratio_y, aspect_ratio_x, aspect_ratio_y])
|
faces[:, margins] = np.int32(
|
||||||
faces[:, [ssd_labels.right, ssd_labels.bottom]] -= faces[:, [ssd_labels.left, ssd_labels.top]]
|
faces[:, margins] * [aspect_ratio_x, aspect_ratio_y, aspect_ratio_x, aspect_ratio_y]
|
||||||
|
)
|
||||||
|
faces[:, [ssd_labels.right, ssd_labels.bottom]] -= faces[
|
||||||
|
:, [ssd_labels.left, ssd_labels.top]
|
||||||
|
]
|
||||||
|
|
||||||
resp = []
|
resp = []
|
||||||
for face in faces:
|
for face in faces:
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
import os
|
# built-in dependencies
|
||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import gdown
|
|
||||||
|
# project dependencies
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
from deepface.commons import folder_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
logger = Logger()
|
logger = Logger()
|
||||||
@ -26,7 +29,7 @@ class YoloClient(Detector):
|
|||||||
model (Any)
|
model (Any)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Import the Ultralytics YOLO model
|
# Import the optional Ultralytics YOLO model
|
||||||
try:
|
try:
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
except ModuleNotFoundError as e:
|
except ModuleNotFoundError as e:
|
||||||
@ -35,23 +38,12 @@ class YoloClient(Detector):
|
|||||||
"Please install using 'pip install ultralytics'"
|
"Please install using 'pip install ultralytics'"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
weight_path = os.path.join(home, PATH)
|
file_name="yolov8n-face.pt", source_url=WEIGHT_URL
|
||||||
|
)
|
||||||
# Download the model's weights if they don't exist
|
|
||||||
if not os.path.isfile(weight_path):
|
|
||||||
logger.info(f"Downloading Yolo weights from {WEIGHT_URL} to {weight_path}...")
|
|
||||||
try:
|
|
||||||
gdown.download(WEIGHT_URL, weight_path, quiet=False)
|
|
||||||
except Exception as err:
|
|
||||||
raise ValueError(
|
|
||||||
f"Exception while downloading Yolo weights from {WEIGHT_URL}."
|
|
||||||
f"You may consider to download it to {weight_path} manually."
|
|
||||||
) from err
|
|
||||||
logger.info(f"Yolo model is just downloaded to {os.path.basename(weight_path)}")
|
|
||||||
|
|
||||||
# Return face_detector
|
# Return face_detector
|
||||||
return YOLO(weight_path)
|
return YOLO(weight_file)
|
||||||
|
|
||||||
def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
|
def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
|
||||||
"""
|
"""
|
||||||
|
@ -5,10 +5,9 @@ from typing import Any, List
|
|||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import gdown
|
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import folder_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.Detector import Detector, FacialAreaRegion
|
from deepface.models.Detector import Detector, FacialAreaRegion
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -40,16 +39,13 @@ class YuNetClient(Detector):
|
|||||||
raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}")
|
raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}")
|
||||||
|
|
||||||
# pylint: disable=C0301
|
# pylint: disable=C0301
|
||||||
url = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
file_name = "face_detection_yunet_2023mar.onnx"
|
file_name="face_detection_yunet_2023mar.onnx",
|
||||||
home = folder_utils.get_deepface_home()
|
source_url="https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx",
|
||||||
output = os.path.join(home, ".deepface/weights", file_name)
|
)
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{file_name} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
face_detector = cv2.FaceDetectorYN_create(output, "", (0, 0))
|
face_detector = cv2.FaceDetectorYN_create(weight_file, "", (0, 0))
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Exception while calling opencv.FaceDetectorYN_create module."
|
"Exception while calling opencv.FaceDetectorYN_create module."
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import os
|
# project dependencies
|
||||||
import gdown
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
|
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
@ -79,20 +78,13 @@ def load_model(
|
|||||||
model = Model(inputs, embedding, name=base_model.name)
|
model = Model(inputs, embedding, name=base_model.name)
|
||||||
|
|
||||||
# ---------------------------------------
|
# ---------------------------------------
|
||||||
# check the availability of pre-trained weights
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
|
file_name="arcface_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
model.load_weights(weight_file)
|
||||||
|
|
||||||
file_name = "arcface_weights.h5"
|
|
||||||
output = os.path.join(home, ".deepface/weights", file_name)
|
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{file_name} will be downloaded to {output}")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
# ---------------------------------------
|
# ---------------------------------------
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import os
|
# project dependencies
|
||||||
import gdown
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -86,13 +85,10 @@ def load_model(
|
|||||||
|
|
||||||
# ---------------------------------
|
# ---------------------------------
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/deepid_keras_weights.h5")
|
file_name="deepid_keras_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model.load_weights(weight_file)
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
import os
|
|
||||||
import bz2
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from deepface.commons import folder_utils
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ class DlibClient(FacialRecognition):
|
|||||||
class DlibResNet:
|
class DlibResNet:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
||||||
## this is not a must dependency. do not import it in the global level.
|
# This is not a must dependency. Don't import it in the global level.
|
||||||
try:
|
try:
|
||||||
import dlib
|
import dlib
|
||||||
except ModuleNotFoundError as e:
|
except ModuleNotFoundError as e:
|
||||||
@ -66,21 +68,11 @@ class DlibResNet:
|
|||||||
"Please install using 'pip install dlib' "
|
"Please install using 'pip install dlib' "
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
filename = "dlib_face_recognition_resnet_model_v1.dat"
|
file_name="dlib_face_recognition_resnet_model_v1.dat",
|
||||||
weight_file = os.path.join(home, ".deepface/weights", filename)
|
source_url="http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2",
|
||||||
|
compress_type="bz2",
|
||||||
# download pre-trained model if it does not exist
|
)
|
||||||
if not os.path.isfile(weight_file):
|
|
||||||
logger.info(f"{filename} is going to be downloaded")
|
|
||||||
url = f"http://dlib.net/files/{filename + '.bz2'}"
|
|
||||||
output = weight_file + ".bz2"
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
zipfile = bz2.BZ2File(output)
|
|
||||||
data = zipfile.read()
|
|
||||||
with open(weight_file, "wb") as f:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
self.model = dlib.face_recognition_model_v1(weight_file)
|
self.model = dlib.face_recognition_model_v1(weight_file)
|
||||||
|
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import os
|
# project dependencies
|
||||||
import gdown
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -1666,20 +1665,10 @@ def load_facenet128d_model(
|
|||||||
"""
|
"""
|
||||||
model = InceptionResNetV1()
|
model = InceptionResNetV1()
|
||||||
|
|
||||||
# -----------------------------------
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
|
file_name="facenet_weights.h5", source_url=url
|
||||||
home = folder_utils.get_deepface_home()
|
)
|
||||||
output = os.path.join(home, ".deepface/weights/facenet_weights.h5")
|
model.load_weights(weight_file)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
# -----------------------------------
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
# -----------------------------------
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
@ -1695,19 +1684,9 @@ def load_facenet512d_model(
|
|||||||
|
|
||||||
model = InceptionResNetV1(dimension=512)
|
model = InceptionResNetV1(dimension=512)
|
||||||
|
|
||||||
# -------------------------
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
|
file_name="facenet512_weights.h5", source_url=url
|
||||||
home = folder_utils.get_deepface_home()
|
)
|
||||||
output = os.path.join(home, ".deepface/weights/facenet512_weights.h5")
|
model.load_weights(weight_file)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
import os
|
# project dependencies
|
||||||
import zipfile
|
from deepface.commons import package_utils, weight_utils
|
||||||
import gdown
|
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -84,20 +82,11 @@ def load_model(
|
|||||||
|
|
||||||
# ---------------------------------
|
# ---------------------------------
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
filename = "VGGFace2_DeepFace_weights_val-0.9034.h5"
|
file_name="VGGFace2_DeepFace_weights_val-0.9034.h5", source_url=url, compress_type="zip"
|
||||||
output = os.path.join(home, ".deepface/weights", filename)
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
base_model.load_weights(weight_file)
|
||||||
logger.info(f"{filename} will be downloaded...")
|
|
||||||
output_zipped = output + ".zip"
|
|
||||||
gdown.download(url, output_zipped, quiet=False)
|
|
||||||
|
|
||||||
# unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
|
|
||||||
with zipfile.ZipFile(output_zipped, "r") as zip_ref:
|
|
||||||
zip_ref.extractall(os.path.join(home, ".deepface/weights"))
|
|
||||||
|
|
||||||
base_model.load_weights(output)
|
|
||||||
|
|
||||||
# drop F8 and D0. F7 is the representation layer.
|
# drop F8 and D0. F7 is the representation layer.
|
||||||
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
|
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
# built-in dependencies
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import package_utils, folder_utils
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -74,15 +70,11 @@ class GhostFaceNetClient(FacialRecognition):
|
|||||||
def load_model():
|
def load_model():
|
||||||
model = GhostFaceNetV1()
|
model = GhostFaceNetV1()
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/ghostfacenet_v1.h5")
|
file_name="ghostfacenet_v1.h5", source_url=PRETRAINED_WEIGHTS
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model.load_weights(weight_file)
|
||||||
logger.info(f"Pre-trained weights is downloaded from {PRETRAINED_WEIGHTS} to {output}")
|
|
||||||
gdown.download(PRETRAINED_WEIGHTS, output, quiet=False)
|
|
||||||
logger.info(f"Pre-trained weights is just downloaded to {output}")
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import os
|
# 3rd party dependencies
|
||||||
import gdown
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -380,16 +381,11 @@ def load_model(
|
|||||||
|
|
||||||
# -----------------------------------
|
# -----------------------------------
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/openface_weights.h5")
|
file_name="openface_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model.load_weights(weight_file)
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
# -----------------------------------
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
# -----------------------------------
|
# -----------------------------------
|
||||||
|
|
||||||
|
@ -1,14 +1,12 @@
|
|||||||
# built-in dependencies
|
# built-in dependencies
|
||||||
import os
|
|
||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
# 3rd party dependencies
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
import gdown
|
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import folder_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
@ -55,14 +53,11 @@ def load_model(
|
|||||||
Construct SFace model, download its weights and load
|
Construct SFace model, download its weights and load
|
||||||
"""
|
"""
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/face_recognition_sface_2021dec.onnx")
|
file_name="face_recognition_sface_2021dec.onnx", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model = SFaceWrapper(model_path=weight_file)
|
||||||
logger.info(f"{os.path.basename(output)} weights will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
model = SFaceWrapper(model_path=output)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
|
# built-in dependencies
|
||||||
from typing import List
|
from typing import List
|
||||||
import os
|
|
||||||
import gdown
|
# 3rd party dependencies
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from deepface.commons import package_utils, folder_utils
|
|
||||||
|
# project dependencies
|
||||||
|
from deepface.commons import package_utils, weight_utils
|
||||||
from deepface.modules import verification
|
from deepface.modules import verification
|
||||||
from deepface.models.FacialRecognition import FacialRecognition
|
from deepface.models.FacialRecognition import FacialRecognition
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
@ -133,14 +136,11 @@ def load_model(
|
|||||||
|
|
||||||
model = base_model()
|
model = base_model()
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
weight_file = weight_utils.download_weights_if_necessary(
|
||||||
output = os.path.join(home, ".deepface/weights/vgg_face_weights.h5")
|
file_name="vgg_face_weights.h5", source_url=url
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.isfile(output):
|
model.load_weights(weight_file)
|
||||||
logger.info(f"{os.path.basename(output)} will be downloaded...")
|
|
||||||
gdown.download(url, output, quiet=False)
|
|
||||||
|
|
||||||
model.load_weights(output)
|
|
||||||
|
|
||||||
# 2622d dimensional model
|
# 2622d dimensional model
|
||||||
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
|
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
|
||||||
|
@ -1,6 +1,3 @@
|
|||||||
# Minivision's Silent-Face-Anti-Spoofing Repo licensed under Apache License 2.0
|
|
||||||
# Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
|
|
||||||
|
|
||||||
# built-in dependencies
|
# built-in dependencies
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
@ -9,15 +6,18 @@ import cv2
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# project dependencies
|
# project dependencies
|
||||||
from deepface.commons import folder_utils, file_utils
|
from deepface.commons import weight_utils
|
||||||
from deepface.commons.logger import Logger
|
from deepface.commons.logger import Logger
|
||||||
|
|
||||||
logger = Logger()
|
logger = Logger()
|
||||||
|
|
||||||
# pylint: disable=line-too-long, too-few-public-methods
|
# pylint: disable=line-too-long, too-few-public-methods, nested-min-max
|
||||||
class Fasnet:
|
class Fasnet:
|
||||||
"""
|
"""
|
||||||
Mini Face Anti Spoofing Net Library from repo: github.com/minivision-ai/Silent-Face-Anti-Spoofing
|
Mini Face Anti Spoofing Net Library from repo: github.com/minivision-ai/Silent-Face-Anti-Spoofing
|
||||||
|
|
||||||
|
Minivision's Silent-Face-Anti-Spoofing Repo licensed under Apache License 2.0
|
||||||
|
Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -29,21 +29,18 @@ class Fasnet:
|
|||||||
"You must install torch with `pip install pytorch` command to use face anti spoofing module"
|
"You must install torch with `pip install pytorch` command to use face anti spoofing module"
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
home = folder_utils.get_deepface_home()
|
|
||||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||||
self.device = device
|
self.device = device
|
||||||
|
|
||||||
# download pre-trained models if not installed yet
|
# download pre-trained models if not installed yet
|
||||||
file_utils.download_external_file(
|
first_model_weight_file = weight_utils.download_weights_if_necessary(
|
||||||
file_name="2.7_80x80_MiniFASNetV2.pth",
|
file_name="2.7_80x80_MiniFASNetV2.pth",
|
||||||
exact_file_path=f"{home}/.deepface/weights/2.7_80x80_MiniFASNetV2.pth",
|
source_url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth",
|
||||||
url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
file_utils.download_external_file(
|
second_model_weight_file = weight_utils.download_weights_if_necessary(
|
||||||
file_name="4_0_0_80x80_MiniFASNetV1SE.pth",
|
file_name="4_0_0_80x80_MiniFASNetV1SE.pth",
|
||||||
exact_file_path=f"{home}/.deepface/weights/4_0_0_80x80_MiniFASNetV1SE.pth",
|
source_url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth",
|
||||||
url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# guarantees Fasnet imported and torch installed
|
# guarantees Fasnet imported and torch installed
|
||||||
@ -56,9 +53,7 @@ class Fasnet:
|
|||||||
second_model = FasNetBackbone.MiniFASNetV1SE(conv6_kernel=(5, 5)).to(device)
|
second_model = FasNetBackbone.MiniFASNetV1SE(conv6_kernel=(5, 5)).to(device)
|
||||||
|
|
||||||
# load model weight for first model
|
# load model weight for first model
|
||||||
state_dict = torch.load(
|
state_dict = torch.load(first_model_weight_file, map_location=device)
|
||||||
f"{home}/.deepface/weights/2.7_80x80_MiniFASNetV2.pth", map_location=device
|
|
||||||
)
|
|
||||||
keys = iter(state_dict)
|
keys = iter(state_dict)
|
||||||
first_layer_name = keys.__next__()
|
first_layer_name = keys.__next__()
|
||||||
|
|
||||||
@ -74,9 +69,7 @@ class Fasnet:
|
|||||||
first_model.load_state_dict(state_dict)
|
first_model.load_state_dict(state_dict)
|
||||||
|
|
||||||
# load model weight for second model
|
# load model weight for second model
|
||||||
state_dict = torch.load(
|
state_dict = torch.load(second_model_weight_file, map_location=device)
|
||||||
f"{home}/.deepface/weights/4_0_0_80x80_MiniFASNetV1SE.pth", map_location=device
|
|
||||||
)
|
|
||||||
keys = iter(state_dict)
|
keys = iter(state_dict)
|
||||||
first_layer_name = keys.__next__()
|
first_layer_name = keys.__next__()
|
||||||
|
|
||||||
@ -191,7 +184,6 @@ def _get_new_box(src_w, src_h, bbox, scale):
|
|||||||
y = bbox[1]
|
y = bbox[1]
|
||||||
box_w = bbox[2]
|
box_w = bbox[2]
|
||||||
box_h = bbox[3]
|
box_h = bbox[3]
|
||||||
# pylint: disable=nested-min-max
|
|
||||||
scale = min((src_h - 1) / box_h, min((src_w - 1) / box_w, scale))
|
scale = min((src_h - 1) / box_h, min((src_w - 1) / box_w, scale))
|
||||||
new_width = box_w * scale
|
new_width = box_w * scale
|
||||||
new_height = box_h * scale
|
new_height = box_h * scale
|
||||||
|
Loading…
x
Reference in New Issue
Block a user