mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
simplified code
This commit is contained in:
parent
3485c348bc
commit
5171484957
@ -1,77 +1,49 @@
|
|||||||
|
|
||||||
from deepface.detectors import FaceDetector
|
from deepface.detectors import FaceDetector
|
||||||
|
|
||||||
|
|
||||||
# Link - https://google.github.io/mediapipe/solutions/face_detection
|
# Link - https://google.github.io/mediapipe/solutions/face_detection
|
||||||
|
|
||||||
def build_model():
|
def build_model():
|
||||||
import mediapipe as mp
|
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
|
||||||
mp_face_detection = mp.solutions.face_detection
|
mp_face_detection = mp.solutions.face_detection
|
||||||
# Build a face detector
|
face_detection = mp_face_detection.FaceDetection( min_detection_confidence=0.7)
|
||||||
# min_detection_confidence - "A filter to analyse the training photographs"
|
|
||||||
face_detection = mp_face_detection.FaceDetection( min_detection_confidence=0.6)
|
|
||||||
return face_detection
|
return face_detection
|
||||||
|
|
||||||
def detect_face(face_detector, img, align=True):
|
def detect_face(face_detector, img, align = True):
|
||||||
import mediapipe as mp
|
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
|
||||||
import re
|
|
||||||
#mp_face_detection = mp.solutions.face_detection
|
|
||||||
#mp_drawing = mp.solutions.drawing_utils
|
|
||||||
resp = []
|
resp = []
|
||||||
|
|
||||||
|
img_width = img.shape[1]; img_height = img.shape[0]
|
||||||
|
|
||||||
results = face_detector.process(img)
|
results = face_detector.process(img)
|
||||||
original_size = img.shape
|
|
||||||
target_size = (300, 300)
|
if results.detections:
|
||||||
# First face , than eye
|
|
||||||
#print(results.detections)
|
|
||||||
if results.detections:
|
|
||||||
for detection in results.detections:
|
for detection in results.detections:
|
||||||
#mp_drawing.draw_detection(img, detection)
|
|
||||||
#print(detection)
|
confidence = detection.score
|
||||||
# detected_face is the cropped image that is then passed forward to the Regognizer
|
|
||||||
'''
|
bounding_box = detection.location_data.relative_bounding_box
|
||||||
DETECTION -
|
landmarks = detection.location_data.relative_keypoints
|
||||||
Collection of detected faces, where each face is represented as a detection proto message that contains
|
|
||||||
a bounding box and 6 key points (right eye, left eye, nose tip, mouth center, right ear tragion, and left
|
x = int(bounding_box.xmin * img_width)
|
||||||
ear tragion). The bounding box is composed of xmin and width (both normalized to [0.0, 1.0] by the
|
w = int(bounding_box.width * img_width)
|
||||||
image width) and ymin and height (both normalized to [0.0, 1.0] by the image height). Each key point
|
y = int(bounding_box.ymin * img_height)
|
||||||
is composed of x and y, which are normalized to [0.0, 1.0] by the image width and height
|
h = int(bounding_box.height * img_height)
|
||||||
respectively.
|
|
||||||
'''
|
right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
|
||||||
# Bounding Box
|
left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
|
||||||
x = re.findall('xmin: (..*)',str(detection))
|
#nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
|
||||||
y = re.findall('ymin: (..*)',str(detection))
|
#mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
|
||||||
h = re.findall('height: (..*)',str(detection))
|
#right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
|
||||||
w = re.findall('width: (..*)',str(detection))
|
#left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
|
||||||
# Eye Locations
|
|
||||||
reye_x = re.findall('x: (..*)',str(detection))[0]
|
if x > 0 and y > 0:
|
||||||
leye_x = re.findall('x: (..*)',str(detection))[1]
|
detected_face = img[y:y+h, x:x+w]
|
||||||
reye_y = re.findall('y: (..*)', str(detection))[0]
|
img_region = [x, y, w, h]
|
||||||
leye_y = re.findall('y: (..*)', str(detection))[1]
|
|
||||||
# Detections are normalized by the mediapipe API, thus they need to be multiplied
|
|
||||||
# Extra tweaking done to improve accuracy
|
|
||||||
x = (float(x[0]) * original_size[1])
|
|
||||||
y = (float(y[0]) * original_size[0]-15)
|
|
||||||
h = (float(h[0]) * original_size[0]+10)
|
|
||||||
w = (float(w[0]) * original_size[1]+10)
|
|
||||||
reye_x = (float(reye_x) * original_size[1])
|
|
||||||
leye_x = (float(leye_x) * original_size[1])
|
|
||||||
reye_y = (float(reye_y) * original_size[0])
|
|
||||||
leye_y = (float(leye_y) * original_size[0])
|
|
||||||
if float(x) and float(y) > 0:
|
|
||||||
detected_face = img[int(y):int(y + h), int(x):int(x + w)]
|
|
||||||
img_region = [int(x), int(y), int(w), int(h)]
|
|
||||||
if align:
|
if align:
|
||||||
left_eye=(leye_x,leye_y)
|
|
||||||
right_eye=(reye_x,reye_y)
|
|
||||||
#print(left_eye)
|
|
||||||
#print(right_eye)
|
|
||||||
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
|
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
|
||||||
|
|
||||||
resp.append((detected_face,img_region))
|
resp.append((detected_face,img_region))
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
#print("Yahoo")
|
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
#face_detector = FaceDetector.build_model('mediapipe')
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user