From 7b0f34cbb1805d3772380775149cd67b3e6b8bba Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Wed, 1 Mar 2023 12:44:47 +0100 Subject: [PATCH 1/5] Close #532, add docstring and some refactoring. --- deepface/commons/functions.py | 126 +++++++++++++++++++++++++++------- 1 file changed, 103 insertions(+), 23 deletions(-) diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py index e47e367..60f9d2d 100644 --- a/deepface/commons/functions.py +++ b/deepface/commons/functions.py @@ -29,7 +29,12 @@ elif tf_major_version == 2: # -------------------------------------------------- -def initialize_folder(): +def initialize_folder() -> None: + """Initialize the folder for storing weights and models. + + Raises: + OSError: if the folder cannot be created. + """ home = get_deepface_home() if not os.path.exists(home + "/.deepface"): @@ -41,7 +46,12 @@ def initialize_folder(): print("Directory ", home, "/.deepface/weights created") -def get_deepface_home(): +def get_deepface_home() -> str: + """Get the home directory for storing weights and models. + + Returns: + str: the home directory. + """ return str(os.getenv("DEEPFACE_HOME", default=str(Path.home()))) @@ -49,6 +59,14 @@ def get_deepface_home(): def loadBase64Img(uri): + """Load image from base64 string. + + Args: + uri: a base64 string. + + Returns: + numpy array: the loaded image. + """ encoded_data = uri.split(",")[1] nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) @@ -56,32 +74,38 @@ def loadBase64Img(uri): def load_image(img): + """Load image from path, url, base64 or numpy array. + + Args: + img: a path, url, base64 or numpy array. + + Raises: + ValueError: if the image path does not exist. + + Returns: + numpy array: the loaded image. + """ exact_image = False - base64_img = False - url_img = False + # The image is already a numpy array if type(img).__module__ == np.__name__: - exact_image = True + # exact_image = True + return img + # The image is a base64 string elif img.startswith("data:image/"): - base64_img = True + return loadBase64Img(img) + # The image is a url elif img.startswith("http"): - url_img = True + return np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB"))[:, :, ::-1] - # --------------------------- - - if base64_img is True: - img = loadBase64Img(img) - - elif url_img is True: - img = np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB")) - - elif exact_image is not True: # image path passed as input + # The image is a path + if exact_image is not True: # image path passed as input if os.path.isfile(img) is not True: raise ValueError(f"Confirm that {img} exists") - img = cv2.imread(img) + return cv2.imread(img) return img @@ -96,7 +120,23 @@ def extract_faces( grayscale=False, enforce_detection=True, align=True, -): +) -> list: + """Extract faces from an image. + + Args: + img: a path, url, base64 or numpy array. + target_size (tuple, optional): the target size of the extracted faces. Defaults to (224, 224). + detector_backend (str, optional): the face detector backend. Defaults to "opencv". + grayscale (bool, optional): whether to convert the extracted faces to grayscale. Defaults to False. + enforce_detection (bool, optional): whether to enforce face detection. Defaults to True. + align (bool, optional): whether to align the extracted faces. Defaults to True. + + Raises: + ValueError: if face could not be detected and enforce_detection is True. + + Returns: + list: a list of extracted faces. + """ # this is going to store a list of img itself (numpy), it region and confidence extracted_faces = [] @@ -109,7 +149,8 @@ def extract_faces( face_objs = [(img, img_region, 0)] else: face_detector = FaceDetector.build_model(detector_backend) - face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align) + face_objs = FaceDetector.detect_faces( + face_detector, detector_backend, img, align) # in case of no face found if len(face_objs) == 0 and enforce_detection is True: @@ -133,7 +174,8 @@ def extract_faces( factor_1 = target_size[1] / current_img.shape[1] factor = min(factor_0, factor_1) - dsize = (int(current_img.shape[1] * factor), int(current_img.shape[0] * factor)) + dsize = ( + int(current_img.shape[1] * factor), int(current_img.shape[0] * factor)) current_img = cv2.resize(current_img, dsize) diff_0 = target_size[0] - current_img.shape[0] @@ -152,7 +194,8 @@ def extract_faces( else: current_img = np.pad( current_img, - ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), + ((diff_0 // 2, diff_0 - diff_0 // 2), + (diff_1 // 2, diff_1 - diff_1 // 2)), "constant", ) @@ -161,7 +204,8 @@ def extract_faces( current_img = cv2.resize(current_img, target_size) # normalizing the image pixels - img_pixels = image.img_to_array(current_img) # what this line doing? must? + # what this line doing? must? + img_pixels = image.img_to_array(current_img) img_pixels = np.expand_dims(img_pixels, axis=0) img_pixels /= 255 # normalize input in [0, 1] @@ -185,6 +229,15 @@ def extract_faces( def normalize_input(img, normalization="base"): + """Normalize input image. + + Args: + img (numpy array): the input image. + normalization (str, optional): the normalization technique. Defaults to "base", for no normalization. + + Returns: + numpy array: the normalized image. + """ # issue 131 declares that some normalization techniques improves the accuracy @@ -232,7 +285,15 @@ def normalize_input(img, normalization="base"): return img -def find_target_size(model_name): +def find_target_size(model_name: str) -> tuple: + """Find the target size of the model. + + Args: + model_name (str): the model name. + + Returns: + tuple: the target size. + """ target_sizes = { "VGG-Face": (224, 224), @@ -267,6 +328,25 @@ def preprocess_face( enforce_detection=True, align=True, ): + """Preprocess face. + + Args: + img (numpy array): the input image. + target_size (tuple, optional): the target size. Defaults to (224, 224). + detector_backend (str, optional): the detector backend. Defaults to "opencv". + grayscale (bool, optional): whether to convert to grayscale. Defaults to False. + enforce_detection (bool, optional): whether to enforce face detection. Defaults to True. + align (bool, optional): whether to align the face. Defaults to True. + + Returns: + numpy array: the preprocessed face. + + Raises: + ValueError: if face is not detected and enforce_detection is True. + + Deprecated: + 0.0.78: Use extract_faces instead of preprocess_face. + """ print("⚠️ Function preprocess_face is deprecated. Use extract_faces instead.") result = None img_objs = extract_faces( From 0d176360ad3ed894bd5a626fe443a7a857966fae Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Wed, 1 Mar 2023 13:05:04 +0100 Subject: [PATCH 2/5] Remove type hints (PEP484) --- deepface/commons/functions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py index 60f9d2d..47f23ef 100644 --- a/deepface/commons/functions.py +++ b/deepface/commons/functions.py @@ -29,7 +29,7 @@ elif tf_major_version == 2: # -------------------------------------------------- -def initialize_folder() -> None: +def initialize_folder(): """Initialize the folder for storing weights and models. Raises: @@ -46,7 +46,7 @@ def initialize_folder() -> None: print("Directory ", home, "/.deepface/weights created") -def get_deepface_home() -> str: +def get_deepface_home(): """Get the home directory for storing weights and models. Returns: @@ -120,7 +120,7 @@ def extract_faces( grayscale=False, enforce_detection=True, align=True, -) -> list: +): """Extract faces from an image. Args: @@ -285,7 +285,7 @@ def normalize_input(img, normalization="base"): return img -def find_target_size(model_name: str) -> tuple: +def find_target_size(model_name): """Find the target size of the model. Args: From 3a325f5540eacc3b763581c5f8f9d7e5a1380fb8 Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Wed, 1 Mar 2023 13:40:46 +0100 Subject: [PATCH 3/5] Reformat with black and pylint. --- .vscode/settings.json | 28 ++++++++++------------- deepface/commons/functions.py | 43 +++++++++++++++++++---------------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index d827213..1429cb3 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,17 +1,13 @@ { - "python.linting.pylintEnabled": true, - "python.linting.enabled": true, - "python.linting.pylintUseMinimalCheckers": false, - "editor.formatOnSave": true, - "editor.renderWhitespace": "all", - "files.autoSave": "afterDelay", - "python.analysis.typeCheckingMode": "basic", - "python.formatting.provider": "black", - "python.formatting.blackArgs": [ - "--line-length=100" - ], - "editor.fontWeight": "normal", - "python.analysis.extraPaths": [ - "./deepface" - ] -} \ No newline at end of file + "python.linting.pylintEnabled": true, + "python.linting.enabled": true, + "python.linting.pylintUseMinimalCheckers": false, + "editor.formatOnSave": true, + "editor.renderWhitespace": "all", + "files.autoSave": "afterDelay", + "python.analysis.typeCheckingMode": "basic", + "python.formatting.provider": "autopep8", + "python.formatting.blackArgs": ["--line-length=100"], + "editor.fontWeight": "normal", + "python.analysis.extraPaths": ["./deepface"] +} diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py index 47f23ef..47f46fa 100644 --- a/deepface/commons/functions.py +++ b/deepface/commons/functions.py @@ -85,29 +85,25 @@ def load_image(img): Returns: numpy array: the loaded image. """ - exact_image = False - # The image is already a numpy array if type(img).__module__ == np.__name__: - # exact_image = True return img # The image is a base64 string - elif img.startswith("data:image/"): + if img.startswith("data:image/"): return loadBase64Img(img) # The image is a url - elif img.startswith("http"): - return np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB"))[:, :, ::-1] + if img.startswith("http"): + return np.array( + Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB") + )[:, :, ::-1] # The image is a path - if exact_image is not True: # image path passed as input - if os.path.isfile(img) is not True: - raise ValueError(f"Confirm that {img} exists") + if os.path.isfile(img) is not True: + raise ValueError(f"Confirm that {img} exists") - return cv2.imread(img) - - return img + return cv2.imread(img) # -------------------------------------------------- @@ -125,9 +121,11 @@ def extract_faces( Args: img: a path, url, base64 or numpy array. - target_size (tuple, optional): the target size of the extracted faces. Defaults to (224, 224). + target_size (tuple, optional): the target size of the extracted faces. + Defaults to (224, 224). detector_backend (str, optional): the face detector backend. Defaults to "opencv". - grayscale (bool, optional): whether to convert the extracted faces to grayscale. Defaults to False. + grayscale (bool, optional): whether to convert the extracted faces to grayscale. + Defaults to False. enforce_detection (bool, optional): whether to enforce face detection. Defaults to True. align (bool, optional): whether to align the extracted faces. Defaults to True. @@ -150,7 +148,8 @@ def extract_faces( else: face_detector = FaceDetector.build_model(detector_backend) face_objs = FaceDetector.detect_faces( - face_detector, detector_backend, img, align) + face_detector, detector_backend, img, align + ) # in case of no face found if len(face_objs) == 0 and enforce_detection is True: @@ -164,7 +163,6 @@ def extract_faces( for current_img, current_region, confidence in face_objs: if current_img.shape[0] > 0 and current_img.shape[1] > 0: - if grayscale is True: current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY) @@ -175,7 +173,9 @@ def extract_faces( factor = min(factor_0, factor_1) dsize = ( - int(current_img.shape[1] * factor), int(current_img.shape[0] * factor)) + int(current_img.shape[1] * factor), + int(current_img.shape[0] * factor), + ) current_img = cv2.resize(current_img, dsize) diff_0 = target_size[0] - current_img.shape[0] @@ -194,8 +194,10 @@ def extract_faces( else: current_img = np.pad( current_img, - ((diff_0 // 2, diff_0 - diff_0 // 2), - (diff_1 // 2, diff_1 - diff_1 // 2)), + ( + (diff_0 // 2, diff_0 - diff_0 // 2), + (diff_1 // 2, diff_1 - diff_1 // 2), + ), "constant", ) @@ -233,7 +235,8 @@ def normalize_input(img, normalization="base"): Args: img (numpy array): the input image. - normalization (str, optional): the normalization technique. Defaults to "base", for no normalization. + normalization (str, optional): the normalization technique. Defaults to "base", + for no normalization. Returns: numpy array: the normalized image. From 34c7a81f76670b4015e7eac71228617ccfc1b9b1 Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Wed, 1 Mar 2023 13:49:28 +0100 Subject: [PATCH 4/5] Restore settings.json --- .vscode/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 1429cb3..f5d9a83 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,7 +6,7 @@ "editor.renderWhitespace": "all", "files.autoSave": "afterDelay", "python.analysis.typeCheckingMode": "basic", - "python.formatting.provider": "autopep8", + "python.formatting.provider": "black", "python.formatting.blackArgs": ["--line-length=100"], "editor.fontWeight": "normal", "python.analysis.extraPaths": ["./deepface"] From 03d7d1931fc9ebc7d171a611143ad9566b35c88c Mon Sep 17 00:00:00 2001 From: Vincent STRAGIER Date: Wed, 1 Mar 2023 14:01:15 +0100 Subject: [PATCH 5/5] Reformat using black with settings.json --- deepface/commons/functions.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py index 47f46fa..9cc5ef6 100644 --- a/deepface/commons/functions.py +++ b/deepface/commons/functions.py @@ -95,9 +95,9 @@ def load_image(img): # The image is a url if img.startswith("http"): - return np.array( - Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB") - )[:, :, ::-1] + return np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB"))[ + :, :, ::-1 + ] # The image is a path if os.path.isfile(img) is not True: @@ -147,9 +147,7 @@ def extract_faces( face_objs = [(img, img_region, 0)] else: face_detector = FaceDetector.build_model(detector_backend) - face_objs = FaceDetector.detect_faces( - face_detector, detector_backend, img, align - ) + face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align) # in case of no face found if len(face_objs) == 0 and enforce_detection is True: