diff --git a/README.md b/README.md index 822f298..f351c20 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,9 @@ [![GitHub Sponsors](https://img.shields.io/github/sponsors/serengil?logo=GitHub&color=lightgray)](https://github.com/sponsors/serengil) [![Buy Me a Coffee](https://img.shields.io/badge/-buy_me_a%C2%A0coffee-gray?logo=buy-me-a-coffee)](https://buymeacoffee.com/serengil) +[![Hacker News](https://img.shields.io/badge/dynamic/json?color=orange&label=Hacker%20News&query=score&url=https%3A%2F%2Fhacker-news.firebaseio.com%2Fv0%2Fitem%2F42584896.json&logo=y-combinator)](https://news.ycombinator.com/item?id=42584896) +[![Product Hunt](https://img.shields.io/badge/Product%20Hunt-%E2%96%B2-orange?logo=producthunt)](https://www.producthunt.com/posts/deepface?embed=true&utm_source=badge-featured&utm_medium=badge&utm_souce=badge-deepface) + @@ -392,7 +395,7 @@ Before creating a PR, you should run the unit tests and linting locally by runni There are many ways to support a project - starring⭐️ the GitHub repo is just one 🙏 -If you do like this work, then you can support it financially on [Patreon](https://www.patreon.com/serengil?repo=deepface), [GitHub Sponsors](https://github.com/sponsors/serengil) or [Buy Me a Coffee](https://buymeacoffee.com/serengil). +If you do like this work, then you can support it financially on [Patreon](https://www.patreon.com/serengil?repo=deepface), [GitHub Sponsors](https://github.com/sponsors/serengil) or [Buy Me a Coffee](https://buymeacoffee.com/serengil). Also, your company's logo will be shown on README on GitHub and PyPI if you become a sponsor in gold, silver or bronze tiers. @@ -402,7 +405,19 @@ If you do like this work, then you can support it financially on [Patreon](https -Also, your company's logo will be shown on README on GitHub and PyPI if you become a sponsor in gold, silver or bronze tiers. +Additionally, you can help us reach a wider audience by upvoting our posts on Hacker News and Product Hunt. + +
+ + + Featured on Hacker News + + + + + DeepFace - A Lightweight Deep Face Recognition Library for Python | Product Hunt + +
## Citation diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py index 6eb31ac..f8930e5 100644 --- a/deepface/DeepFace.py +++ b/deepface/DeepFace.py @@ -68,18 +68,18 @@ def build_model(model_name: str, task: str = "facial_recognition") -> Any: def verify( - img1_path: Union[str, np.ndarray, List[float]], - img2_path: Union[str, np.ndarray, List[float]], - model_name: str = "VGG-Face", - detector_backend: str = "opencv", - distance_metric: str = "cosine", - enforce_detection: bool = True, - align: bool = True, - expand_percentage: int = 0, - normalization: str = "base", - silent: bool = False, - threshold: Optional[float] = None, - anti_spoofing: bool = False, + img1_path: Union[str, np.ndarray, List[float]], + img2_path: Union[str, np.ndarray, List[float]], + model_name: str = "VGG-Face", + detector_backend: str = "opencv", + distance_metric: str = "cosine", + enforce_detection: bool = True, + align: bool = True, + expand_percentage: int = 0, + normalization: str = "base", + silent: bool = False, + threshold: Optional[float] = None, + anti_spoofing: bool = False, ) -> Dict[str, Any]: """ Verify if an image pair represents the same person or different persons. @@ -164,14 +164,14 @@ def verify( def analyze( - img_path: Union[str, np.ndarray], - actions: Union[tuple, list] = ("emotion", "age", "gender", "race"), - enforce_detection: bool = True, - detector_backend: str = "opencv", - align: bool = True, - expand_percentage: int = 0, - silent: bool = False, - anti_spoofing: bool = False, + img_path: Union[str, np.ndarray], + actions: Union[tuple, list] = ("emotion", "age", "gender", "race"), + enforce_detection: bool = True, + detector_backend: str = "opencv", + align: bool = True, + expand_percentage: int = 0, + silent: bool = False, + anti_spoofing: bool = False, ) -> List[Dict[str, Any]]: """ Analyze facial attributes such as age, gender, emotion, and race in the provided image. @@ -263,20 +263,20 @@ def analyze( def find( - img_path: Union[str, np.ndarray], - db_path: str, - model_name: str = "VGG-Face", - distance_metric: str = "cosine", - enforce_detection: bool = True, - detector_backend: str = "opencv", - align: bool = True, - expand_percentage: int = 0, - threshold: Optional[float] = None, - normalization: str = "base", - silent: bool = False, - refresh_database: bool = True, - anti_spoofing: bool = False, - batched: bool = False, + img_path: Union[str, np.ndarray], + db_path: str, + model_name: str = "VGG-Face", + distance_metric: str = "cosine", + enforce_detection: bool = True, + detector_backend: str = "opencv", + align: bool = True, + expand_percentage: int = 0, + threshold: Optional[float] = None, + normalization: str = "base", + silent: bool = False, + refresh_database: bool = True, + anti_spoofing: bool = False, + batched: bool = False, ) -> Union[List[pd.DataFrame], List[List[Dict[str, Any]]]]: """ Identify individuals in a database @@ -369,15 +369,15 @@ def find( def represent( - img_path: Union[str, np.ndarray], - model_name: str = "VGG-Face", - enforce_detection: bool = True, - detector_backend: str = "opencv", - align: bool = True, - expand_percentage: int = 0, - normalization: str = "base", - anti_spoofing: bool = False, - max_faces: Optional[int] = None, + img_path: Union[str, np.ndarray], + model_name: str = "VGG-Face", + enforce_detection: bool = True, + detector_backend: str = "opencv", + align: bool = True, + expand_percentage: int = 0, + normalization: str = "base", + anti_spoofing: bool = False, + max_faces: Optional[int] = None, ) -> List[Dict[str, Any]]: """ Represent facial images as multi-dimensional vector embeddings. @@ -441,15 +441,16 @@ def represent( def stream( - db_path: str = "", - model_name: str = "VGG-Face", - detector_backend: str = "opencv", - distance_metric: str = "cosine", - enable_face_analysis: bool = True, - source: Any = 0, - time_threshold: int = 5, - frame_threshold: int = 5, - anti_spoofing: bool = False, + db_path: str = "", + model_name: str = "VGG-Face", + detector_backend: str = "opencv", + distance_metric: str = "cosine", + enable_face_analysis: bool = True, + source: Any = 0, + time_threshold: int = 5, + frame_threshold: int = 5, + anti_spoofing: bool = False, + output_path: Optional[str] = None, ) -> None: """ Run real time face recognition and facial attribute analysis @@ -478,6 +479,10 @@ def stream( frame_threshold (int): The frame threshold for face recognition (default is 5). anti_spoofing (boolean): Flag to enable anti spoofing (default is False). + + output_path (str): Path to save the output video. (default is None + If None, no video is saved). + Returns: None """ @@ -495,19 +500,20 @@ def stream( time_threshold=time_threshold, frame_threshold=frame_threshold, anti_spoofing=anti_spoofing, + output_path=output_path, ) def extract_faces( - img_path: Union[str, np.ndarray], - detector_backend: str = "opencv", - enforce_detection: bool = True, - align: bool = True, - expand_percentage: int = 0, - grayscale: bool = False, - color_face: str = "rgb", - normalize_face: bool = True, - anti_spoofing: bool = False, + img_path: Union[str, np.ndarray], + detector_backend: str = "opencv", + enforce_detection: bool = True, + align: bool = True, + expand_percentage: int = 0, + grayscale: bool = False, + color_face: str = "rgb", + normalize_face: bool = True, + anti_spoofing: bool = False, ) -> List[Dict[str, Any]]: """ Extract faces from a given image @@ -584,11 +590,11 @@ def cli() -> None: def detectFace( - img_path: Union[str, np.ndarray], - target_size: tuple = (224, 224), - detector_backend: str = "opencv", - enforce_detection: bool = True, - align: bool = True, + img_path: Union[str, np.ndarray], + target_size: tuple = (224, 224), + detector_backend: str = "opencv", + enforce_detection: bool = True, + align: bool = True, ) -> Union[np.ndarray, None]: """ Deprecated face detection function. Use extract_faces for same functionality. diff --git a/deepface/modules/recognition.py b/deepface/modules/recognition.py index 1edb430..f153132 100644 --- a/deepface/modules/recognition.py +++ b/deepface/modules/recognition.py @@ -149,7 +149,7 @@ def find( # Ensure the proper pickle file exists if not os.path.exists(datastore_path): with open(datastore_path, "wb") as f: - pickle.dump([], f) + pickle.dump([], f, pickle.HIGHEST_PROTOCOL) # Load the representations from the pickle file with open(datastore_path, "rb") as f: @@ -232,7 +232,7 @@ def find( if must_save_pickle: with open(datastore_path, "wb") as f: - pickle.dump(representations, f) + pickle.dump(representations, f, pickle.HIGHEST_PROTOCOL) if not silent: logger.info(f"There are now {len(representations)} representations in {file_name}") diff --git a/deepface/modules/streaming.py b/deepface/modules/streaming.py index cc44783..e461e56 100644 --- a/deepface/modules/streaming.py +++ b/deepface/modules/streaming.py @@ -22,6 +22,7 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" IDENTIFIED_IMG_SIZE = 112 TEXT_COLOR = (255, 255, 255) + # pylint: disable=unused-variable def analysis( db_path: str, @@ -33,6 +34,7 @@ def analysis( time_threshold=5, frame_threshold=5, anti_spoofing: bool = False, + output_path: Optional[str] = None, ): """ Run real time face recognition and facial attribute analysis @@ -62,6 +64,8 @@ def analysis( anti_spoofing (boolean): Flag to enable anti spoofing (default is False). + output_path (str): Path to save the output video. (default is None + If None, no video is saved). Returns: None """ @@ -77,12 +81,31 @@ def analysis( model_name=model_name, ) + cap = cv2.VideoCapture(source if isinstance(source, str) else int(source)) + if not cap.isOpened(): + logger.error(f"Cannot open video source: {source}") + return + + # Get video properties + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for output file + # Ensure the output directory exists if output_path is provided + if output_path: + os.makedirs(os.path.dirname(output_path), exist_ok=True) + # Initialize video writer if output_path is provided + video_writer = ( + cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height)) + if output_path + else None + ) + freezed_img = None freeze = False num_frames_with_faces = 0 tic = time.time() - cap = cv2.VideoCapture(source) # webcam while True: has_frame, img = cap.read() if not has_frame: @@ -91,9 +114,9 @@ def analysis( # we are adding some figures into img such as identified facial image, age, gender # that is why, we need raw image itself to make analysis raw_img = img.copy() - faces_coordinates = [] - if freeze is False: + + if not freeze: faces_coordinates = grab_facial_areas( img=img, detector_backend=detector_backend, anti_spoofing=anti_spoofing ) @@ -101,7 +124,6 @@ def analysis( # we will pass img to analyze modules (identity, demography) and add some illustrations # that is why, we will not be able to extract detected face from img clearly detected_faces = extract_facial_areas(img=img, faces_coordinates=faces_coordinates) - img = highlight_facial_areas(img=img, faces_coordinates=faces_coordinates) img = countdown_to_freeze( img=img, @@ -111,8 +133,8 @@ def analysis( ) num_frames_with_faces = num_frames_with_faces + 1 if len(faces_coordinates) else 0 - freeze = num_frames_with_faces > 0 and num_frames_with_faces % frame_threshold == 0 + if freeze: # add analyze results into img - derive from raw_img img = highlight_facial_areas( @@ -144,22 +166,28 @@ def analysis( tic = time.time() logger.info("freezed") - elif freeze is True and time.time() - tic > time_threshold: + elif freeze and time.time() - tic > time_threshold: freeze = False freezed_img = None # reset counter for freezing tic = time.time() - logger.info("freeze released") + logger.info("Freeze released") freezed_img = countdown_to_release(img=freezed_img, tic=tic, time_threshold=time_threshold) + display_img = img if freezed_img is None else freezed_img - cv2.imshow("img", img if freezed_img is None else freezed_img) + # Save the frame to output video if writer is initialized + if video_writer: + video_writer.write(display_img) - if cv2.waitKey(1) & 0xFF == ord("q"): # press q to quit + cv2.imshow("img", display_img) + if cv2.waitKey(1) & 0xFF == ord("q"): break - # kill open cv things + # Release resources cap.release() + if video_writer: + video_writer.release() cv2.destroyAllWindows()