clean code refactoring

This commit is contained in:
Sefik Ilkin Serengil 2023-01-29 21:39:54 +00:00
parent 029f3dfa43
commit 2d39ade138
5 changed files with 255 additions and 262 deletions

View File

@ -26,9 +26,6 @@ from deepface.basemodels import (
SFace,
)
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.extendedmodels.Emotion import EMOTION_LABELS
from deepface.extendedmodels.Gender import GENDER_LABELS
from deepface.extendedmodels.Race import RACE_LABELS
from deepface.commons import functions, realtime, distance as dst
# -----------------------------------
@ -342,11 +339,11 @@ def analyze(
obj["emotion"] = {}
for i, emotion_label in enumerate(EMOTION_LABELS):
for i, emotion_label in enumerate(Emotion.labels):
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
obj["emotion"][emotion_label] = emotion_prediction
obj["dominant_emotion"] = EMOTION_LABELS[np.argmax(emotion_predictions)]
obj["dominant_emotion"] = Emotion.labels[np.argmax(emotion_predictions)]
elif action == "age":
age_predictions = models["age"].predict(img_content, verbose=0)[0, :]
@ -357,22 +354,22 @@ def analyze(
elif action == "gender":
gender_predictions = models["gender"].predict(img_content, verbose=0)[0, :]
obj["gender"] = {}
for i, gender_label in enumerate(GENDER_LABELS):
for i, gender_label in enumerate(Gender.labels):
gender_prediction = 100 * gender_predictions[i]
obj["gender"][gender_label] = gender_prediction
obj["dominant_gender"] = GENDER_LABELS[np.argmax(gender_predictions)]
obj["dominant_gender"] = Gender.labels[np.argmax(gender_predictions)]
elif action == "race":
race_predictions = models["race"].predict(img_content, verbose=0)[0, :]
sum_of_predictions = race_predictions.sum()
obj["race"] = {}
for i, race_label in enumerate(RACE_LABELS):
for i, race_label in enumerate(Race.labels):
race_prediction = 100 * race_predictions[i] / sum_of_predictions
obj["race"][race_label] = race_prediction
obj["dominant_race"] = RACE_LABELS[np.argmax(race_predictions)]
obj["dominant_race"] = Race.labels[np.argmax(race_predictions)]
# -----------------------------
# mention facial areas

View File

@ -4,7 +4,7 @@ import numpy as np
import pandas as pd
import cv2
from deepface import DeepFace
from deepface.commons import functions, distance as dst
from deepface.commons import functions
# dependency configuration
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
@ -30,7 +30,6 @@ def analysis(
enable_age_gender = True
# ------------------------
# find custom values for this input set
threshold = dst.findThreshold(model_name, distance_metric)
target_size = functions.find_target_size(model_name=model_name)
# ------------------------
# build models once to store them in the memory
@ -423,260 +422,259 @@ def analysis(
if df.shape[0] > 0:
candidate = df.iloc[0]
label = candidate["identity"]
best_distance = candidate[f"{model_name}_{distance_metric}"]
if best_distance <= threshold:
# to use this source image as is
display_img = cv2.imread(label)
# to use extracted face
source_objs = DeepFace.extract_faces(
img_path=label,
target_size=(pivot_img_size, pivot_img_size),
detector_backend=detector_backend,
enforce_detection=False,
align=False,
)
if len(source_objs) > 0:
# extract 1st item directly
source_obj = source_objs[0]
display_img = source_obj["face"]
display_img *= 255
display_img = display_img[:, :, ::-1]
# --------------------
label = label.split("/")[-1]
# to use this source image as is
display_img = cv2.imread(label)
# to use extracted face
source_objs = DeepFace.extract_faces(
img_path=label,
target_size=(pivot_img_size, pivot_img_size),
detector_backend=detector_backend,
enforce_detection=False,
align=False,
)
try:
if (
y - pivot_img_size > 0
and x + w + pivot_img_size < resolution_x
):
# top right
freeze_img[
y - pivot_img_size : y,
x + w : x + w + pivot_img_size,
] = display_img
if len(source_objs) > 0:
# extract 1st item directly
source_obj = source_objs[0]
display_img = source_obj["face"]
display_img *= 255
display_img = display_img[:, :, ::-1]
# --------------------
label = label.split("/")[-1]
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x + w, y),
(x + w + pivot_img_size, y + 20),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
try:
if (
y - pivot_img_size > 0
and x + w + pivot_img_size < resolution_x
):
# top right
freeze_img[
y - pivot_img_size : y,
x + w : x + w + pivot_img_size,
] = display_img
cv2.putText(
freeze_img,
label,
(x + w, y + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x + w, y),
(x + w + pivot_img_size, y + 20),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y),
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
(x + w, y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
cv2.putText(
freeze_img,
label,
(x + w, y + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
elif (
y + h + pivot_img_size < resolution_y
and x - pivot_img_size > 0
):
# bottom left
freeze_img[
y + h : y + h + pivot_img_size,
x - pivot_img_size : x,
] = display_img
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y),
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
(x + w, y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x - pivot_img_size, y + h - 20),
(x, y + h),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
elif (
y + h + pivot_img_size < resolution_y
and x - pivot_img_size > 0
):
# bottom left
freeze_img[
y + h : y + h + pivot_img_size,
x - pivot_img_size : x,
] = display_img
cv2.putText(
freeze_img,
label,
(x - pivot_img_size, y + h - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x - pivot_img_size, y + h - 20),
(x, y + h),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y + h),
(
x + int(w / 2) - int(w / 4),
y + h + int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) - int(w / 4),
y + h + int(pivot_img_size / 2),
),
(x, y + h + int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
cv2.putText(
freeze_img,
label,
(x - pivot_img_size, y + h - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
# top left
freeze_img[
y - pivot_img_size : y, x - pivot_img_size : x
] = display_img
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y + h),
(
x + int(w / 2) - int(w / 4),
y + h + int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) - int(w / 4),
y + h + int(pivot_img_size / 2),
),
(x, y + h + int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x - pivot_img_size, y),
(x, y + 20),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
# top left
freeze_img[
y - pivot_img_size : y, x - pivot_img_size : x
] = display_img
cv2.putText(
freeze_img,
label,
(x - pivot_img_size, y + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x - pivot_img_size, y),
(x, y + 20),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y),
(
x + int(w / 2) - int(w / 4),
y - int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) - int(w / 4),
y - int(pivot_img_size / 2),
),
(x, y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
cv2.putText(
freeze_img,
label,
(x - pivot_img_size, y + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
elif (
x + w + pivot_img_size < resolution_x
and y + h + pivot_img_size < resolution_y
):
# bottom righ
freeze_img[
y + h : y + h + pivot_img_size,
x + w : x + w + pivot_img_size,
] = display_img
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y),
(
x + int(w / 2) - int(w / 4),
y - int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) - int(w / 4),
y - int(pivot_img_size / 2),
),
(x, y - int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x + w, y + h - 20),
(x + w + pivot_img_size, y + h),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
elif (
x + w + pivot_img_size < resolution_x
and y + h + pivot_img_size < resolution_y
):
# bottom righ
freeze_img[
y + h : y + h + pivot_img_size,
x + w : x + w + pivot_img_size,
] = display_img
cv2.putText(
freeze_img,
label,
(x + w, y + h - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
overlay = freeze_img.copy()
opacity = 0.4
cv2.rectangle(
freeze_img,
(x + w, y + h - 20),
(x + w + pivot_img_size, y + h),
(46, 200, 255),
cv2.FILLED,
)
cv2.addWeighted(
overlay,
opacity,
freeze_img,
1 - opacity,
0,
freeze_img,
)
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y + h),
(
x + int(w / 2) + int(w / 4),
y + h + int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) + int(w / 4),
y + h + int(pivot_img_size / 2),
),
(x + w, y + h + int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
except Exception as err: # pylint: disable=broad-except
print(str(err))
cv2.putText(
freeze_img,
label,
(x + w, y + h - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
text_color,
1,
)
# connect face and text
cv2.line(
freeze_img,
(x + int(w / 2), y + h),
(
x + int(w / 2) + int(w / 4),
y + h + int(pivot_img_size / 2),
),
(67, 67, 67),
1,
)
cv2.line(
freeze_img,
(
x + int(w / 2) + int(w / 4),
y + h + int(pivot_img_size / 2),
),
(x + w, y + h + int(pivot_img_size / 2)),
(67, 67, 67),
1,
)
except Exception as err: # pylint: disable=broad-except
print(str(err))
tic = time.time() # in this way, freezed image can show 5 seconds

View File

@ -24,6 +24,9 @@ elif tf_version == 2:
)
# -------------------------------------------
# Labels for the emotions that can be detected by the model.
labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
@ -70,7 +73,3 @@ def loadModel(
model.load_weights(home + "/.deepface/weights/facial_expression_model_weights.h5")
return model
EMOTION_LABELS = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
"""Labels for the emotions that can be detected by the model."""

View File

@ -18,6 +18,11 @@ elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# -------------------------------------
# Labels for the genders that can be detected by the model.
labels = ["Woman", "Man"]
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
):
@ -51,9 +56,3 @@ def loadModel(
gender_model.load_weights(home + "/.deepface/weights/gender_model_weights.h5")
return gender_model
# --------------------------
GENDER_LABELS = ["Woman", "Man"]
"""Labels for the genders that can be detected by the model."""

View File

@ -17,6 +17,10 @@ elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# --------------------------
# Labels for the ethnic phenotypes that can be detected by the model.
labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispanic"]
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
):
@ -50,7 +54,3 @@ def loadModel(
race_model.load_weights(home + "/.deepface/weights/race_model_single_batch.h5")
return race_model
RACE_LABELS = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
"""Labels for the ethnic phenotypes that can be detected by the model."""