mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
solved pr comments: restore comments and used one line if
This commit is contained in:
parent
2f9ef19e09
commit
c513b26d6d
@ -450,7 +450,7 @@ def stream(
|
|||||||
time_threshold: int = 5,
|
time_threshold: int = 5,
|
||||||
frame_threshold: int = 5,
|
frame_threshold: int = 5,
|
||||||
anti_spoofing: bool = False,
|
anti_spoofing: bool = False,
|
||||||
output_path: Optional[str] = None, # New parameter
|
output_path: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Run real time face recognition and facial attribute analysis
|
Run real time face recognition and facial attribute analysis
|
||||||
@ -480,7 +480,7 @@ def stream(
|
|||||||
|
|
||||||
anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
|
anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
|
||||||
|
|
||||||
output_path (str): Path to save the output video. If None, no video is saved.
|
output_path (str): Path to save the output video. If None, no video is saved (default is None).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
@ -499,7 +499,7 @@ def stream(
|
|||||||
time_threshold=time_threshold,
|
time_threshold=time_threshold,
|
||||||
frame_threshold=frame_threshold,
|
frame_threshold=frame_threshold,
|
||||||
anti_spoofing=anti_spoofing,
|
anti_spoofing=anti_spoofing,
|
||||||
output_path=output_path, # Pass the output_path to analysis
|
output_path=output_path,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -34,29 +34,45 @@ def analysis(
|
|||||||
time_threshold=5,
|
time_threshold=5,
|
||||||
frame_threshold=5,
|
frame_threshold=5,
|
||||||
anti_spoofing: bool = False,
|
anti_spoofing: bool = False,
|
||||||
output_path: Optional[str] = None, # New parameter
|
output_path: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Run real-time face recognition and facial attribute analysis, with optional video output.
|
Run real time face recognition and facial attribute analysis
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
db_path (str): Path to the folder containing image files.
|
db_path (string): Path to the folder containing image files. All detected faces
|
||||||
model_name (str): Model for face recognition.
|
in the database will be considered in the decision-making process.
|
||||||
detector_backend (str): Face detector backend.
|
|
||||||
distance_metric (str): Metric for measuring similarity.
|
model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
|
||||||
enable_face_analysis (bool): Flag to enable face analysis.
|
OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face)
|
||||||
source (Any): The source for the video stream (camera index or video file path).
|
|
||||||
time_threshold (int): Time threshold (in seconds) for face recognition.
|
detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
|
||||||
frame_threshold (int): Frame threshold for face recognition.
|
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
|
||||||
anti_spoofing (bool): Flag to enable anti-spoofing.
|
'centerface' or 'skip' (default is opencv).
|
||||||
output_path (str): Path to save the output video. If None, no video is saved.
|
|
||||||
|
distance_metric (string): Metric for measuring similarity. Options: 'cosine',
|
||||||
|
'euclidean', 'euclidean_l2' (default is cosine).
|
||||||
|
|
||||||
|
enable_face_analysis (bool): Flag to enable face analysis (default is True).
|
||||||
|
|
||||||
|
source (Any): The source for the video stream (default is 0, which represents the
|
||||||
|
default camera).
|
||||||
|
|
||||||
|
time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
|
||||||
|
|
||||||
|
frame_threshold (int): The frame threshold for face recognition (default is 5).
|
||||||
|
|
||||||
|
anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
|
||||||
|
|
||||||
|
output_path (str): Path to save the output video. If None, no video is saved (default is None).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
# Initialize models
|
# initialize models
|
||||||
build_demography_models(enable_face_analysis=enable_face_analysis)
|
build_demography_models(enable_face_analysis=enable_face_analysis)
|
||||||
build_facial_recognition_model(model_name=model_name)
|
build_facial_recognition_model(model_name=model_name)
|
||||||
|
# call a dummy find function for db_path once to create embeddings before starting webcam
|
||||||
_ = search_identity(
|
_ = search_identity(
|
||||||
detected_face=np.zeros([224, 224, 3]),
|
detected_face=np.zeros([224, 224, 3]),
|
||||||
db_path=db_path,
|
db_path=db_path,
|
||||||
@ -77,9 +93,11 @@ def analysis(
|
|||||||
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for output file
|
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for output file
|
||||||
|
|
||||||
# Initialize video writer if output_path is provided
|
# Initialize video writer if output_path is provided
|
||||||
video_writer = None
|
video_writer = (
|
||||||
if output_path:
|
cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
|
||||||
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
if output_path
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
freezed_img = None
|
freezed_img = None
|
||||||
freeze = False
|
freeze = False
|
||||||
@ -91,6 +109,8 @@ def analysis(
|
|||||||
if not has_frame:
|
if not has_frame:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
# we are adding some figures into img such as identified facial image, age, gender
|
||||||
|
# that is why, we need raw image itself to make analysis
|
||||||
raw_img = img.copy()
|
raw_img = img.copy()
|
||||||
faces_coordinates = []
|
faces_coordinates = []
|
||||||
|
|
||||||
@ -98,6 +118,9 @@ def analysis(
|
|||||||
faces_coordinates = grab_facial_areas(
|
faces_coordinates = grab_facial_areas(
|
||||||
img=img, detector_backend=detector_backend, anti_spoofing=anti_spoofing
|
img=img, detector_backend=detector_backend, anti_spoofing=anti_spoofing
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# we will pass img to analyze modules (identity, demography) and add some illustrations
|
||||||
|
# that is why, we will not be able to extract detected face from img clearly
|
||||||
detected_faces = extract_facial_areas(img=img, faces_coordinates=faces_coordinates)
|
detected_faces = extract_facial_areas(img=img, faces_coordinates=faces_coordinates)
|
||||||
img = highlight_facial_areas(img=img, faces_coordinates=faces_coordinates)
|
img = highlight_facial_areas(img=img, faces_coordinates=faces_coordinates)
|
||||||
img = countdown_to_freeze(
|
img = countdown_to_freeze(
|
||||||
@ -111,15 +134,19 @@ def analysis(
|
|||||||
freeze = num_frames_with_faces > 0 and num_frames_with_faces % frame_threshold == 0
|
freeze = num_frames_with_faces > 0 and num_frames_with_faces % frame_threshold == 0
|
||||||
|
|
||||||
if freeze:
|
if freeze:
|
||||||
|
# add analyze results into img - derive from raw_img
|
||||||
img = highlight_facial_areas(
|
img = highlight_facial_areas(
|
||||||
img=raw_img, faces_coordinates=faces_coordinates, anti_spoofing=anti_spoofing
|
img=raw_img, faces_coordinates=faces_coordinates, anti_spoofing=anti_spoofing
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# age, gender and emotion analysis
|
||||||
img = perform_demography_analysis(
|
img = perform_demography_analysis(
|
||||||
enable_face_analysis=enable_face_analysis,
|
enable_face_analysis=enable_face_analysis,
|
||||||
img=raw_img,
|
img=raw_img,
|
||||||
faces_coordinates=faces_coordinates,
|
faces_coordinates=faces_coordinates,
|
||||||
detected_faces=detected_faces,
|
detected_faces=detected_faces,
|
||||||
)
|
)
|
||||||
|
# facial recogntion analysis
|
||||||
img = perform_facial_recognition(
|
img = perform_facial_recognition(
|
||||||
img=img,
|
img=img,
|
||||||
faces_coordinates=faces_coordinates,
|
faces_coordinates=faces_coordinates,
|
||||||
@ -129,13 +156,18 @@ def analysis(
|
|||||||
distance_metric=distance_metric,
|
distance_metric=distance_metric,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# freeze the img after analysis
|
||||||
freezed_img = img.copy()
|
freezed_img = img.copy()
|
||||||
|
|
||||||
|
# start counter for freezing
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
logger.info("Image frozen for analysis")
|
logger.info("freezed")
|
||||||
|
|
||||||
elif freeze and time.time() - tic > time_threshold:
|
elif freeze and time.time() - tic > time_threshold:
|
||||||
freeze = False
|
freeze = False
|
||||||
freezed_img = None
|
freezed_img = None
|
||||||
|
# reset counter for freezing
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
logger.info("Freeze released")
|
logger.info("Freeze released")
|
||||||
|
|
||||||
@ -222,10 +254,10 @@ def search_identity(
|
|||||||
# detected face is coming from parent, safe to access 1st index
|
# detected face is coming from parent, safe to access 1st index
|
||||||
df = dfs[0]
|
df = dfs[0]
|
||||||
|
|
||||||
if df.shape[0] == 0:
|
if df.shape[0] == 0: # type: ignore
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
candidate = df.iloc[0]
|
candidate = df.iloc[0] # type: ignore
|
||||||
target_path = candidate["identity"]
|
target_path = candidate["identity"]
|
||||||
logger.info(f"Hello, {target_path}")
|
logger.info(f"Hello, {target_path}")
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user