batched detection

This commit is contained in:
galthran-wq 2025-02-12 09:43:18 +00:00
parent 72e82f0605
commit f4d18a70c0

View File

@ -19,7 +19,7 @@ logger = Logger()
def extract_faces(
img_path: Union[str, np.ndarray, IO[bytes]],
img_path: Union[List[Union[str, np.ndarray, IO[bytes]]], str, np.ndarray, IO[bytes]],
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
@ -31,10 +31,10 @@ def extract_faces(
max_faces: Optional[int] = None,
) -> List[Dict[str, Any]]:
"""
Extract faces from a given image
Extract faces from a given image or list of images
Args:
img_path (str or np.ndarray or IO[bytes]): Path to the first image. Accepts exact image path
img_paths (List[str or np.ndarray or IO[bytes]] or str or np.ndarray or IO[bytes]): Path(s) to the image(s). Accepts exact image path
as a string, numpy array (BGR), a file object that supports at least `.read` and is
opened in binary mode, or base64 encoded images.
@ -80,30 +80,39 @@ def extract_faces(
just available in the result only if anti_spoofing is set to True in input arguments.
"""
resp_objs = []
if not isinstance(img_path, list):
img_path = [img_path]
all_images = []
img_names = []
for single_img_path in img_path:
# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img, img_name = image_utils.load_image(img_path)
img, img_name = image_utils.load_image(single_img_path)
if img is None:
raise ValueError(f"Exception while loading {img_name}")
height, width, _ = img.shape
all_images.append(img)
img_names.append(img_name)
base_region = FacialAreaRegion(x=0, y=0, w=width, h=height, confidence=0)
if detector_backend == "skip":
face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
else:
face_objs = detect_faces(
# Run detect_faces for all images at once
all_face_objs = detect_faces(
detector_backend=detector_backend,
img=img,
img=all_images,
align=align,
expand_percentage=expand_percentage,
max_faces=max_faces,
)
# in case of no face found
if len(all_images) == 1:
all_face_objs = [all_face_objs]
all_resp_objs = []
for img, img_name, face_objs in zip(all_images, img_names, all_face_objs):
height, width, _ = img.shape
if len(face_objs) == 0 and enforce_detection is True:
if img_name is not None:
raise ValueError(
@ -118,6 +127,7 @@ def extract_faces(
)
if len(face_objs) == 0 and enforce_detection is False:
base_region = FacialAreaRegion(x=0, y=0, w=width, h=height, confidence=0)
face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
for face_obj in face_objs:
@ -178,37 +188,32 @@ def extract_faces(
resp_obj["is_real"] = is_real
resp_obj["antispoof_score"] = antispoof_score
resp_objs.append(resp_obj)
all_resp_objs.append(resp_obj)
if len(resp_objs) == 0 and enforce_detection == True:
raise ValueError(
f"Exception while extracting faces from {img_name}."
"Consider to set enforce_detection arg to False."
)
return resp_objs
return all_resp_objs
def detect_faces(
detector_backend: str,
img: np.ndarray,
img: Union[np.ndarray, List[np.ndarray]],
align: bool = True,
expand_percentage: int = 0,
max_faces: Optional[int] = None,
) -> List[DetectedFace]:
) -> Union[List[List[DetectedFace]], List[DetectedFace]]:
"""
Detect face(s) from a given image
Detect face(s) from a given image or list of images
Args:
detector_backend (str): detector name
img (np.ndarray): pre-loaded image
img (np.ndarray or List[np.ndarray]): pre-loaded image or list of images
align (bool): enable or disable alignment after detection
expand_percentage (int): expand detected facial area with a percentage (default is 0).
Returns:
results (List[DetectedFace]): A list of DetectedFace objects
results (Union[List[List[DetectedFace]], List[DetectedFace]]):
A list of lists of DetectedFace objects or a list of DetectedFace objects
where each object contains:
- img (np.ndarray): The detected face as a NumPy array.
@ -219,10 +224,16 @@ def detect_faces(
- confidence (float): The confidence score associated with the detected face.
"""
height, width, _ = img.shape
if not isinstance(img, list):
img = [img]
face_detector: Detector = modeling.build_model(
task="face_detector", model_name=detector_backend
)
all_detected_faces = []
for single_img in img:
height, width, _ = single_img.shape
# validate expand percentage score
if expand_percentage < 0:
@ -237,8 +248,8 @@ def detect_faces(
height_border = int(0.5 * height)
width_border = int(0.5 * width)
if align is True:
img = cv2.copyMakeBorder(
img,
single_img = cv2.copyMakeBorder(
single_img,
height_border,
height_border,
width_border,
@ -248,17 +259,17 @@ def detect_faces(
)
# find facial areas of given image
facial_areas = face_detector.detect_faces(img)
facial_areas = face_detector.detect_faces(single_img)
if max_faces is not None and max_faces < len(facial_areas):
facial_areas = nlargest(
max_faces, facial_areas, key=lambda facial_area: facial_area.w * facial_area.h
)
return [
detected_faces = [
extract_face(
facial_area=facial_area,
img=img,
img=single_img,
align=align,
expand_percentage=expand_percentage,
width_border=width_border,
@ -267,6 +278,12 @@ def detect_faces(
for facial_area in facial_areas
]
all_detected_faces.append(detected_faces)
if len(all_detected_faces) == 1:
return all_detected_faces[0]
return all_detected_faces
def extract_face(
facial_area: FacialAreaRegion,