Merge branch 'master' into regions

This commit is contained in:
Sefik Ilkin Serengil 2022-10-24 11:24:36 +03:00 committed by GitHub
commit db3a0f2fe8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 41 additions and 25 deletions

View File

@ -301,7 +301,9 @@ You can also support this work on [Patreon](https://www.patreon.com/serengil?rep
## Citation
Please cite deepface in your publications if it helps your research for facial recognition purposes. Here are its BibTex entries:
Please cite deepface in your publications if it helps your research. Here are its BibTex entries:
If you use deepface for facial recogntion purposes, please cite the this publication.
```BibTeX
@inproceedings{serengil2020lightface,
@ -316,7 +318,7 @@ Please cite deepface in your publications if it helps your research for facial r
}
```
If you use deepface for facial attribute analysis purposes such as age, gender, emotion or ethnicity, please cite the this publication.
If you use deepface for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction, please cite the this publication.
```BibTeX
@inproceedings{serengil2021lightface,

View File

@ -395,7 +395,7 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img, region = functions.preprocess_face(img = img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
emotion_predictions = models['emotion'].predict(img)[0,:]
emotion_predictions = models['emotion'].predict(img, verbose=0)[0,:]
sum_of_predictions = emotion_predictions.sum()
@ -412,7 +412,7 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
if img_224 is None:
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
age_predictions = models['age'].predict(img_224)[0,:]
age_predictions = models['age'].predict(img_224, verbose=0)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj["age"] = int(apparent_age) #int cast is for the exception - object of type 'float32' is not JSON serializable
@ -422,7 +422,7 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
if img_224 is None:
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True)
gender_predictions = models['gender'].predict(img_224)[0,:]
gender_predictions = models['gender'].predict(img_224, verbose=0)[0,:]
gender_labels = ["Woman", "Man"]
resp_obj["gender"] = {}
@ -436,7 +436,7 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , models =
elif action == 'race':
if img_224 is None:
img_224, region = functions.preprocess_face(img = img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection, detector_backend = detector_backend, return_region = True) #just emotion model expects grayscale images
race_predictions = models['race'].predict(img_224)[0,:]
race_predictions = models['race'].predict(img_224, verbose=0)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum()
@ -765,6 +765,11 @@ def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection
#---------------------------------
#represent
if "keras" in str(type(model)):
#new tf versions show progress bar and it is annoying
embedding = model.predict(img, verbose=0)[0].tolist()
else:
#SFace is not a keras model and it has no verbose argument
embedding = model.predict(img)[0].tolist()
return embedding

View File

@ -110,7 +110,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det
face_detector = FaceDetector.build_model(detector_backend)
try:
detected_face, img_region = FaceDetector.detect_face(face_detector, detector_backend, img, align)
detected_face, img_region, _ = FaceDetector.detect_face(face_detector, detector_backend, img, align)
except: #if detected face shape is (0, 0) and alignment cannot be performed, this block will be run
detected_face = None

View File

@ -151,7 +151,7 @@ def analysis(db_path, model_name = 'VGG-Face', detector_backend = 'opencv', dist
detected_faces = []
face_index = 0
for face, (x, y, w, h) in faces:
for face, (x, y, w, h), _ in faces:
if w > 130: #discard small detected faces
face_detected = True

View File

@ -45,10 +45,13 @@ def detect_face(detector, img, align = True):
sp = detector["sp"]
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
face_detector = detector["face_detector"]
detections = face_detector(img, 1)
#note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
detections, scores, _ = face_detector.run(img, 1)
if len(detections) > 0:
@ -60,12 +63,13 @@ def detect_face(detector, img, align = True):
detected_face = img[max(0, top): min(bottom, img.shape[0]), max(0, left): min(right, img.shape[1])]
img_region = [left, top, right - left, bottom - top]
confidence = scores[idx]
if align:
img_shape = sp(img, detections[idx])
detected_face = dlib.get_face_chip(img, img_shape, size = detected_face.shape[0])
resp.append((detected_face, img_region))
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -37,12 +37,12 @@ def detect_face(face_detector, detector_backend, img, align = True):
obj = detect_faces(face_detector, detector_backend, img, align)
if len(obj) > 0:
face, region = obj[0] #discard multiple faces
face, region, confidence = obj[0] #discard multiple faces
else: #len(obj) == 0
face = None
region = [0, 0, img.shape[1], img.shape[0]]
return face, region
return face, region, confidence
def detect_faces(face_detector, detector_backend, img, align = True):
@ -59,7 +59,7 @@ def detect_faces(face_detector, detector_backend, img, align = True):
if detect_face:
obj = detect_face(face_detector, img, align)
#obj stores list of detected_face and region pair
#obj stores list of (detected_face, region, confidence)
return obj
else:

View File

@ -20,7 +20,7 @@ def detect_face(face_detector, img, align = True):
if results.detections:
for detection in results.detections:
confidence = detection.score
confidence, = detection.score
bounding_box = detection.location_data.relative_bounding_box
landmarks = detection.location_data.relative_keypoints
@ -44,6 +44,6 @@ def detect_face(face_detector, img, align = True):
if align:
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
resp.append((detected_face,img_region))
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -22,6 +22,7 @@ def detect_face(face_detector, img, align = True):
x, y, w, h = detection["box"]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
img_region = [x, y, w, h]
confidence = detection["confidence"]
if align:
keypoints = detection["keypoints"]
@ -29,6 +30,6 @@ def detect_face(face_detector, img, align = True):
right_eye = keypoints["right_eye"]
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
resp.append((detected_face, img_region))
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -45,13 +45,15 @@ def detect_face(detector, img, align = True):
faces = []
try:
#faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
faces = detector["face_detector"].detectMultiScale(img, 1.1, 10)
#note that, by design, opencv's haarcascade scores are >0 but not capped at 1
faces, _, scores = detector["face_detector"].detectMultiScale3(img, 1.1, 10, outputRejectLevels = True)
except:
pass
if len(faces) > 0:
for x,y,w,h in faces:
for (x,y,w,h), confidence in zip(faces, scores):
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
if align:
@ -59,7 +61,7 @@ def detect_face(detector, img, align = True):
img_region = [x, y, w, h]
resp.append((detected_face, img_region))
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -44,6 +44,7 @@ def detect_face(face_detector, img, align = True):
x = facial_area[0]
w = facial_area[2] - x
img_region = [x, y, w, h]
confidence = identity["score"]
#detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
@ -58,6 +59,6 @@ def detect_face(face_detector, img, align = True):
detected_face = postprocess.alignment_procedure(detected_face, right_eye, left_eye, nose)
resp.append((detected_face, img_region))
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -93,10 +93,11 @@ def detect_face(detector, img, align = True):
detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)]
img_region = [int(left*aspect_ratio_x), int(top*aspect_ratio_y), int(right*aspect_ratio_x) - int(left*aspect_ratio_x), int(bottom*aspect_ratio_y) - int(top*aspect_ratio_y)]
confidence = instance["confidence"]
if align:
detected_face = OpenCvWrapper.align_face(detector["eye_detector"], detected_face)
resp.append((detected_face, img_region))
resp.append((detected_face, img_region, confidence))
return resp