mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
Merge remote-tracking branch 'upstream/HEAD' into analyze_raise_on_empty_actions
This commit is contained in:
commit
22ffc280bb
12
README.md
12
README.md
@ -313,18 +313,6 @@ $ deepface analyze -img_path tests/dataset/img1.jpg
|
|||||||
|
|
||||||
You can also run these commands if you are running deepface with docker. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh#L17).
|
You can also run these commands if you are running deepface with docker. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh#L17).
|
||||||
|
|
||||||
## Derived applications
|
|
||||||
|
|
||||||
You can use deepface not just for facial recognition tasks. It's very common to use DeepFace for entertainment purposes. For instance, celebrity look-alike prediction and parental look-alike prediction tasks can be done with DeepFace!
|
|
||||||
|
|
||||||
**Parental Look-Alike Prediction** - [`Vlog`](https://youtu.be/nza4tmi9vhE), [`Tutorial`](https://sefiks.com/2022/12/22/decide-whom-your-child-looks-like-with-facial-recognition-mommy-or-daddy/)
|
|
||||||
|
|
||||||
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/parental-look-alike-v2.jpg" width="90%" height="90%"></p>
|
|
||||||
|
|
||||||
**Celebrity Look-Alike Prediction** - [`Vlog`](https://youtu.be/jaxkEn-Kieo), [`Tutorial`](https://sefiks.com/2019/05/05/celebrity-look-alike-face-recognition-with-deep-learning-in-keras/)
|
|
||||||
|
|
||||||
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/look-alike-v3.jpg" width="90%" height="90%"></p>
|
|
||||||
|
|
||||||
## Contribution [](https://github.com/serengil/deepface/actions/workflows/tests.yml)
|
## Contribution [](https://github.com/serengil/deepface/actions/workflows/tests.yml)
|
||||||
|
|
||||||
Pull requests are more than welcome! You should run the unit tests locally by running [`test/unit_tests.py`](https://github.com/serengil/deepface/blob/master/tests/unit_tests.py) before creating a PR. Once a PR sent, GitHub test workflow will be run automatically and unit test results will be available in [GitHub actions](https://github.com/serengil/deepface/actions) before approval. Besides, workflow will evaluate the code with pylint as well.
|
Pull requests are more than welcome! You should run the unit tests locally by running [`test/unit_tests.py`](https://github.com/serengil/deepface/blob/master/tests/unit_tests.py) before creating a PR. Once a PR sent, GitHub test workflow will be run automatically and unit test results will be available in [GitHub actions](https://github.com/serengil/deepface/actions) before approval. Besides, workflow will evaluate the code with pylint as well.
|
||||||
|
@ -36,13 +36,15 @@ def initialize_folder():
|
|||||||
OSError: if the folder cannot be created.
|
OSError: if the folder cannot be created.
|
||||||
"""
|
"""
|
||||||
home = get_deepface_home()
|
home = get_deepface_home()
|
||||||
|
deepFaceHomePath = home + "/.deepface"
|
||||||
|
weightsPath = deepFaceHomePath + "/weights"
|
||||||
|
|
||||||
if not os.path.exists(home + "/.deepface"):
|
if not os.path.exists(deepFaceHomePath):
|
||||||
os.makedirs(home + "/.deepface")
|
os.makedirs(deepFaceHomePath, exist_ok=True)
|
||||||
print("Directory ", home, "/.deepface created")
|
print("Directory ", home, "/.deepface created")
|
||||||
|
|
||||||
if not os.path.exists(home + "/.deepface/weights"):
|
if not os.path.exists(weightsPath):
|
||||||
os.makedirs(home + "/.deepface/weights")
|
os.makedirs(weightsPath, exist_ok=True)
|
||||||
print("Directory ", home, "/.deepface/weights created")
|
print("Directory ", home, "/.deepface/weights created")
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,35 +19,37 @@ def detect_face(face_detector, img, align=True):
|
|||||||
|
|
||||||
results = face_detector.process(img)
|
results = face_detector.process(img)
|
||||||
|
|
||||||
if results.detections:
|
# If no face has been detected, return an empty list
|
||||||
for detection in results.detections:
|
if results.detections is None:
|
||||||
|
return resp
|
||||||
|
|
||||||
(confidence,) = detection.score
|
# Extract the bounding box, the landmarks and the confidence score
|
||||||
|
for detection in results.detections:
|
||||||
|
(confidence,) = detection.score
|
||||||
|
|
||||||
bounding_box = detection.location_data.relative_bounding_box
|
bounding_box = detection.location_data.relative_bounding_box
|
||||||
landmarks = detection.location_data.relative_keypoints
|
landmarks = detection.location_data.relative_keypoints
|
||||||
|
|
||||||
x = int(bounding_box.xmin * img_width)
|
x = int(bounding_box.xmin * img_width)
|
||||||
w = int(bounding_box.width * img_width)
|
w = int(bounding_box.width * img_width)
|
||||||
y = int(bounding_box.ymin * img_height)
|
y = int(bounding_box.ymin * img_height)
|
||||||
h = int(bounding_box.height * img_height)
|
h = int(bounding_box.height * img_height)
|
||||||
|
|
||||||
right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
|
# Extract landmarks
|
||||||
left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
|
left_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
|
||||||
# nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
|
right_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
|
||||||
# mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
|
# nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
|
||||||
# right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
|
# mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
|
||||||
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
|
# right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
|
||||||
|
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
|
||||||
|
|
||||||
if x > 0 and y > 0:
|
if x > 0 and y > 0:
|
||||||
detected_face = img[y : y + h, x : x + w]
|
detected_face = img[y : y + h, x : x + w]
|
||||||
img_region = [x, y, w, h]
|
img_region = [x, y, w, h]
|
||||||
|
|
||||||
if align:
|
if align:
|
||||||
detected_face = FaceDetector.alignment_procedure(
|
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
|
||||||
detected_face, left_eye, right_eye
|
|
||||||
)
|
|
||||||
|
|
||||||
resp.append((detected_face, img_region, confidence))
|
resp.append((detected_face, img_region, confidence))
|
||||||
|
|
||||||
return resp
|
return resp
|
||||||
|
Loading…
x
Reference in New Issue
Block a user