diff --git a/README.md b/README.md
index 9e59f40..a0cd133 100644
--- a/README.md
+++ b/README.md
@@ -313,18 +313,6 @@ $ deepface analyze -img_path tests/dataset/img1.jpg
You can also run these commands if you are running deepface with docker. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh#L17).
-## Derived applications
-
-You can use deepface not just for facial recognition tasks. It's very common to use DeepFace for entertainment purposes. For instance, celebrity look-alike prediction and parental look-alike prediction tasks can be done with DeepFace!
-
-**Parental Look-Alike Prediction** - [`Vlog`](https://youtu.be/nza4tmi9vhE), [`Tutorial`](https://sefiks.com/2022/12/22/decide-whom-your-child-looks-like-with-facial-recognition-mommy-or-daddy/)
-
-

-
-**Celebrity Look-Alike Prediction** - [`Vlog`](https://youtu.be/jaxkEn-Kieo), [`Tutorial`](https://sefiks.com/2019/05/05/celebrity-look-alike-face-recognition-with-deep-learning-in-keras/)
-
-
-
## Contribution [](https://github.com/serengil/deepface/actions/workflows/tests.yml)
Pull requests are more than welcome! You should run the unit tests locally by running [`test/unit_tests.py`](https://github.com/serengil/deepface/blob/master/tests/unit_tests.py) before creating a PR. Once a PR sent, GitHub test workflow will be run automatically and unit test results will be available in [GitHub actions](https://github.com/serengil/deepface/actions) before approval. Besides, workflow will evaluate the code with pylint as well.
diff --git a/deepface/commons/functions.py b/deepface/commons/functions.py
index 562e816..11eb2d8 100644
--- a/deepface/commons/functions.py
+++ b/deepface/commons/functions.py
@@ -36,13 +36,15 @@ def initialize_folder():
OSError: if the folder cannot be created.
"""
home = get_deepface_home()
+ deepFaceHomePath = home + "/.deepface"
+ weightsPath = deepFaceHomePath + "/weights"
- if not os.path.exists(home + "/.deepface"):
- os.makedirs(home + "/.deepface")
+ if not os.path.exists(deepFaceHomePath):
+ os.makedirs(deepFaceHomePath, exist_ok=True)
print("Directory ", home, "/.deepface created")
- if not os.path.exists(home + "/.deepface/weights"):
- os.makedirs(home + "/.deepface/weights")
+ if not os.path.exists(weightsPath):
+ os.makedirs(weightsPath, exist_ok=True)
print("Directory ", home, "/.deepface/weights created")
diff --git a/deepface/detectors/MediapipeWrapper.py b/deepface/detectors/MediapipeWrapper.py
index 74cffff..5753485 100644
--- a/deepface/detectors/MediapipeWrapper.py
+++ b/deepface/detectors/MediapipeWrapper.py
@@ -19,35 +19,37 @@ def detect_face(face_detector, img, align=True):
results = face_detector.process(img)
- if results.detections:
- for detection in results.detections:
+ # If no face has been detected, return an empty list
+ if results.detections is None:
+ return resp
- (confidence,) = detection.score
+ # Extract the bounding box, the landmarks and the confidence score
+ for detection in results.detections:
+ (confidence,) = detection.score
- bounding_box = detection.location_data.relative_bounding_box
- landmarks = detection.location_data.relative_keypoints
+ bounding_box = detection.location_data.relative_bounding_box
+ landmarks = detection.location_data.relative_keypoints
- x = int(bounding_box.xmin * img_width)
- w = int(bounding_box.width * img_width)
- y = int(bounding_box.ymin * img_height)
- h = int(bounding_box.height * img_height)
+ x = int(bounding_box.xmin * img_width)
+ w = int(bounding_box.width * img_width)
+ y = int(bounding_box.ymin * img_height)
+ h = int(bounding_box.height * img_height)
- right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
- left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
- # nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
- # mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
- # right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
- # left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
+ # Extract landmarks
+ left_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
+ right_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
+ # nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
+ # mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
+ # right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
+ # left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
- if x > 0 and y > 0:
- detected_face = img[y : y + h, x : x + w]
- img_region = [x, y, w, h]
+ if x > 0 and y > 0:
+ detected_face = img[y : y + h, x : x + w]
+ img_region = [x, y, w, h]
- if align:
- detected_face = FaceDetector.alignment_procedure(
- detected_face, left_eye, right_eye
- )
+ if align:
+ detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
- resp.append((detected_face, img_region, confidence))
+ resp.append((detected_face, img_region, confidence))
return resp