getting rid of target_size everywhere

This commit is contained in:
Sefik Ilkin Serengil 2024-04-07 18:23:28 +01:00
parent 1078be9f12
commit ae5d5b967a
6 changed files with 11 additions and 8 deletions

View File

@ -237,7 +237,6 @@ demographies = DeepFace.analyze(img_path = "img4.jpg",
#face detection and alignment #face detection and alignment
face_objs = DeepFace.extract_faces(img_path = "img.jpg", face_objs = DeepFace.extract_faces(img_path = "img.jpg",
target_size = (224, 224),
detector_backend = backends[4] detector_backend = backends[4]
) )
``` ```

View File

@ -10,6 +10,7 @@ os.environ["TF_USE_LEGACY_KERAS"] = "1"
# pylint: disable=wrong-import-position # pylint: disable=wrong-import-position
# 3rd party dependencies # 3rd party dependencies
import cv2
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import tensorflow as tf import tensorflow as tf
@ -532,7 +533,6 @@ def detectFace(
logger.warn("Function detectFace is deprecated. Use extract_faces instead.") logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
face_objs = extract_faces( face_objs = extract_faces(
img_path=img_path, img_path=img_path,
target_size=target_size,
detector_backend=detector_backend, detector_backend=detector_backend,
enforce_detection=enforce_detection, enforce_detection=enforce_detection,
align=align, align=align,
@ -541,4 +541,5 @@ def detectFace(
extracted_face = None extracted_face = None
if len(face_objs) > 0: if len(face_objs) > 0:
extracted_face = face_objs[0]["face"] extracted_face = face_objs[0]["face"]
extracted_face = cv2.resize(extracted_face, target_size)
return extracted_face return extracted_face

View File

@ -321,8 +321,6 @@ def __find_bulk_embeddings(
model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512, model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
target_size (tuple): expected input shape of facial recognition model
detector_backend (str): face detector model name detector_backend (str): face detector model name
enforce_detection (bool): set this to False if you enforce_detection (bool): set this to False if you

View File

@ -10,7 +10,6 @@ import cv2
# project dependencies # project dependencies
from deepface import DeepFace from deepface import DeepFace
from deepface.models.FacialRecognition import FacialRecognition
from deepface.commons.logger import Logger from deepface.commons.logger import Logger
logger = Logger(module="commons.realtime") logger = Logger(module="commons.realtime")

View File

@ -1,5 +1,7 @@
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import cv2
from deepface import DeepFace from deepface import DeepFace
from deepface.modules import verification from deepface.modules import verification
from deepface.models.FacialRecognition import FacialRecognition from deepface.models.FacialRecognition import FacialRecognition
@ -21,11 +23,13 @@ logger.info(f"target_size: {target_size}")
# ---------------------------------------------- # ----------------------------------------------
# load images and find embeddings # load images and find embeddings
img1 = DeepFace.extract_faces(img_path="dataset/img1.jpg", target_size=target_size)[0]["face"] img1 = DeepFace.extract_faces(img_path="dataset/img1.jpg")[0]["face"]
img1 = cv2.resize(img1, target_size)
img1 = np.expand_dims(img1, axis=0) # to (1, 224, 224, 3) img1 = np.expand_dims(img1, axis=0) # to (1, 224, 224, 3)
img1_representation = model.forward(img1) img1_representation = model.forward(img1)
img2 = DeepFace.extract_faces(img_path="dataset/img3.jpg", target_size=target_size)[0]["face"] img2 = DeepFace.extract_faces(img_path="dataset/img3.jpg")[0]["face"]
img2 = cv2.resize(img2, target_size)
img2 = np.expand_dims(img2, axis=0) img2 = np.expand_dims(img2, axis=0)
img2_representation = model.forward(img2) img2_representation = model.forward(img2)

View File

@ -7,9 +7,11 @@ img_path = "dataset/img1.jpg"
img = cv2.imread(img_path) img = cv2.imread(img_path)
overlay_img_path = "dataset/img6.jpg" overlay_img_path = "dataset/img6.jpg"
face_objs = DeepFace.extract_faces(overlay_img_path, target_size=(112, 112)) face_objs = DeepFace.extract_faces(overlay_img_path)
overlay_img = face_objs[0]["face"][:, :, ::-1] * 255 overlay_img = face_objs[0]["face"][:, :, ::-1] * 255
overlay_img = cv2.resize(overlay_img, (112, 112))
raw_img = img.copy() raw_img = img.copy()
demographies = DeepFace.analyze(img_path=img_path, actions=("age", "gender", "emotion")) demographies = DeepFace.analyze(img_path=img_path, actions=("age", "gender", "emotion"))