mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
clean code refactoring
This commit is contained in:
parent
029f3dfa43
commit
2d39ade138
@ -26,9 +26,6 @@ from deepface.basemodels import (
|
|||||||
SFace,
|
SFace,
|
||||||
)
|
)
|
||||||
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
||||||
from deepface.extendedmodels.Emotion import EMOTION_LABELS
|
|
||||||
from deepface.extendedmodels.Gender import GENDER_LABELS
|
|
||||||
from deepface.extendedmodels.Race import RACE_LABELS
|
|
||||||
from deepface.commons import functions, realtime, distance as dst
|
from deepface.commons import functions, realtime, distance as dst
|
||||||
|
|
||||||
# -----------------------------------
|
# -----------------------------------
|
||||||
@ -342,11 +339,11 @@ def analyze(
|
|||||||
|
|
||||||
obj["emotion"] = {}
|
obj["emotion"] = {}
|
||||||
|
|
||||||
for i, emotion_label in enumerate(EMOTION_LABELS):
|
for i, emotion_label in enumerate(Emotion.labels):
|
||||||
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
|
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
|
||||||
obj["emotion"][emotion_label] = emotion_prediction
|
obj["emotion"][emotion_label] = emotion_prediction
|
||||||
|
|
||||||
obj["dominant_emotion"] = EMOTION_LABELS[np.argmax(emotion_predictions)]
|
obj["dominant_emotion"] = Emotion.labels[np.argmax(emotion_predictions)]
|
||||||
|
|
||||||
elif action == "age":
|
elif action == "age":
|
||||||
age_predictions = models["age"].predict(img_content, verbose=0)[0, :]
|
age_predictions = models["age"].predict(img_content, verbose=0)[0, :]
|
||||||
@ -357,22 +354,22 @@ def analyze(
|
|||||||
elif action == "gender":
|
elif action == "gender":
|
||||||
gender_predictions = models["gender"].predict(img_content, verbose=0)[0, :]
|
gender_predictions = models["gender"].predict(img_content, verbose=0)[0, :]
|
||||||
obj["gender"] = {}
|
obj["gender"] = {}
|
||||||
for i, gender_label in enumerate(GENDER_LABELS):
|
for i, gender_label in enumerate(Gender.labels):
|
||||||
gender_prediction = 100 * gender_predictions[i]
|
gender_prediction = 100 * gender_predictions[i]
|
||||||
obj["gender"][gender_label] = gender_prediction
|
obj["gender"][gender_label] = gender_prediction
|
||||||
|
|
||||||
obj["dominant_gender"] = GENDER_LABELS[np.argmax(gender_predictions)]
|
obj["dominant_gender"] = Gender.labels[np.argmax(gender_predictions)]
|
||||||
|
|
||||||
elif action == "race":
|
elif action == "race":
|
||||||
race_predictions = models["race"].predict(img_content, verbose=0)[0, :]
|
race_predictions = models["race"].predict(img_content, verbose=0)[0, :]
|
||||||
sum_of_predictions = race_predictions.sum()
|
sum_of_predictions = race_predictions.sum()
|
||||||
|
|
||||||
obj["race"] = {}
|
obj["race"] = {}
|
||||||
for i, race_label in enumerate(RACE_LABELS):
|
for i, race_label in enumerate(Race.labels):
|
||||||
race_prediction = 100 * race_predictions[i] / sum_of_predictions
|
race_prediction = 100 * race_predictions[i] / sum_of_predictions
|
||||||
obj["race"][race_label] = race_prediction
|
obj["race"][race_label] = race_prediction
|
||||||
|
|
||||||
obj["dominant_race"] = RACE_LABELS[np.argmax(race_predictions)]
|
obj["dominant_race"] = Race.labels[np.argmax(race_predictions)]
|
||||||
|
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
# mention facial areas
|
# mention facial areas
|
||||||
|
@ -4,7 +4,7 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import cv2
|
import cv2
|
||||||
from deepface import DeepFace
|
from deepface import DeepFace
|
||||||
from deepface.commons import functions, distance as dst
|
from deepface.commons import functions
|
||||||
|
|
||||||
# dependency configuration
|
# dependency configuration
|
||||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
@ -30,7 +30,6 @@ def analysis(
|
|||||||
enable_age_gender = True
|
enable_age_gender = True
|
||||||
# ------------------------
|
# ------------------------
|
||||||
# find custom values for this input set
|
# find custom values for this input set
|
||||||
threshold = dst.findThreshold(model_name, distance_metric)
|
|
||||||
target_size = functions.find_target_size(model_name=model_name)
|
target_size = functions.find_target_size(model_name=model_name)
|
||||||
# ------------------------
|
# ------------------------
|
||||||
# build models once to store them in the memory
|
# build models once to store them in the memory
|
||||||
@ -423,260 +422,259 @@ def analysis(
|
|||||||
if df.shape[0] > 0:
|
if df.shape[0] > 0:
|
||||||
candidate = df.iloc[0]
|
candidate = df.iloc[0]
|
||||||
label = candidate["identity"]
|
label = candidate["identity"]
|
||||||
best_distance = candidate[f"{model_name}_{distance_metric}"]
|
|
||||||
if best_distance <= threshold:
|
|
||||||
# to use this source image as is
|
|
||||||
display_img = cv2.imread(label)
|
|
||||||
# to use extracted face
|
|
||||||
source_objs = DeepFace.extract_faces(
|
|
||||||
img_path=label,
|
|
||||||
target_size=(pivot_img_size, pivot_img_size),
|
|
||||||
detector_backend=detector_backend,
|
|
||||||
enforce_detection=False,
|
|
||||||
align=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(source_objs) > 0:
|
# to use this source image as is
|
||||||
# extract 1st item directly
|
display_img = cv2.imread(label)
|
||||||
source_obj = source_objs[0]
|
# to use extracted face
|
||||||
display_img = source_obj["face"]
|
source_objs = DeepFace.extract_faces(
|
||||||
display_img *= 255
|
img_path=label,
|
||||||
display_img = display_img[:, :, ::-1]
|
target_size=(pivot_img_size, pivot_img_size),
|
||||||
# --------------------
|
detector_backend=detector_backend,
|
||||||
label = label.split("/")[-1]
|
enforce_detection=False,
|
||||||
|
align=False,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
if len(source_objs) > 0:
|
||||||
if (
|
# extract 1st item directly
|
||||||
y - pivot_img_size > 0
|
source_obj = source_objs[0]
|
||||||
and x + w + pivot_img_size < resolution_x
|
display_img = source_obj["face"]
|
||||||
):
|
display_img *= 255
|
||||||
# top right
|
display_img = display_img[:, :, ::-1]
|
||||||
freeze_img[
|
# --------------------
|
||||||
y - pivot_img_size : y,
|
label = label.split("/")[-1]
|
||||||
x + w : x + w + pivot_img_size,
|
|
||||||
] = display_img
|
|
||||||
|
|
||||||
overlay = freeze_img.copy()
|
try:
|
||||||
opacity = 0.4
|
if (
|
||||||
cv2.rectangle(
|
y - pivot_img_size > 0
|
||||||
freeze_img,
|
and x + w + pivot_img_size < resolution_x
|
||||||
(x + w, y),
|
):
|
||||||
(x + w + pivot_img_size, y + 20),
|
# top right
|
||||||
(46, 200, 255),
|
freeze_img[
|
||||||
cv2.FILLED,
|
y - pivot_img_size : y,
|
||||||
)
|
x + w : x + w + pivot_img_size,
|
||||||
cv2.addWeighted(
|
] = display_img
|
||||||
overlay,
|
|
||||||
opacity,
|
|
||||||
freeze_img,
|
|
||||||
1 - opacity,
|
|
||||||
0,
|
|
||||||
freeze_img,
|
|
||||||
)
|
|
||||||
|
|
||||||
cv2.putText(
|
overlay = freeze_img.copy()
|
||||||
freeze_img,
|
opacity = 0.4
|
||||||
label,
|
cv2.rectangle(
|
||||||
(x + w, y + 10),
|
freeze_img,
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
(x + w, y),
|
||||||
0.5,
|
(x + w + pivot_img_size, y + 20),
|
||||||
text_color,
|
(46, 200, 255),
|
||||||
1,
|
cv2.FILLED,
|
||||||
)
|
)
|
||||||
|
cv2.addWeighted(
|
||||||
|
overlay,
|
||||||
|
opacity,
|
||||||
|
freeze_img,
|
||||||
|
1 - opacity,
|
||||||
|
0,
|
||||||
|
freeze_img,
|
||||||
|
)
|
||||||
|
|
||||||
# connect face and text
|
cv2.putText(
|
||||||
cv2.line(
|
freeze_img,
|
||||||
freeze_img,
|
label,
|
||||||
(x + int(w / 2), y),
|
(x + w, y + 10),
|
||||||
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
(67, 67, 67),
|
0.5,
|
||||||
1,
|
text_color,
|
||||||
)
|
1,
|
||||||
cv2.line(
|
)
|
||||||
freeze_img,
|
|
||||||
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
|
|
||||||
(x + w, y - int(pivot_img_size / 2)),
|
|
||||||
(67, 67, 67),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif (
|
# connect face and text
|
||||||
y + h + pivot_img_size < resolution_y
|
cv2.line(
|
||||||
and x - pivot_img_size > 0
|
freeze_img,
|
||||||
):
|
(x + int(w / 2), y),
|
||||||
# bottom left
|
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
|
||||||
freeze_img[
|
(67, 67, 67),
|
||||||
y + h : y + h + pivot_img_size,
|
1,
|
||||||
x - pivot_img_size : x,
|
)
|
||||||
] = display_img
|
cv2.line(
|
||||||
|
freeze_img,
|
||||||
|
(x + 3 * int(w / 4), y - int(pivot_img_size / 2)),
|
||||||
|
(x + w, y - int(pivot_img_size / 2)),
|
||||||
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
overlay = freeze_img.copy()
|
elif (
|
||||||
opacity = 0.4
|
y + h + pivot_img_size < resolution_y
|
||||||
cv2.rectangle(
|
and x - pivot_img_size > 0
|
||||||
freeze_img,
|
):
|
||||||
(x - pivot_img_size, y + h - 20),
|
# bottom left
|
||||||
(x, y + h),
|
freeze_img[
|
||||||
(46, 200, 255),
|
y + h : y + h + pivot_img_size,
|
||||||
cv2.FILLED,
|
x - pivot_img_size : x,
|
||||||
)
|
] = display_img
|
||||||
cv2.addWeighted(
|
|
||||||
overlay,
|
|
||||||
opacity,
|
|
||||||
freeze_img,
|
|
||||||
1 - opacity,
|
|
||||||
0,
|
|
||||||
freeze_img,
|
|
||||||
)
|
|
||||||
|
|
||||||
cv2.putText(
|
overlay = freeze_img.copy()
|
||||||
freeze_img,
|
opacity = 0.4
|
||||||
label,
|
cv2.rectangle(
|
||||||
(x - pivot_img_size, y + h - 10),
|
freeze_img,
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
(x - pivot_img_size, y + h - 20),
|
||||||
0.5,
|
(x, y + h),
|
||||||
text_color,
|
(46, 200, 255),
|
||||||
1,
|
cv2.FILLED,
|
||||||
)
|
)
|
||||||
|
cv2.addWeighted(
|
||||||
|
overlay,
|
||||||
|
opacity,
|
||||||
|
freeze_img,
|
||||||
|
1 - opacity,
|
||||||
|
0,
|
||||||
|
freeze_img,
|
||||||
|
)
|
||||||
|
|
||||||
# connect face and text
|
cv2.putText(
|
||||||
cv2.line(
|
freeze_img,
|
||||||
freeze_img,
|
label,
|
||||||
(x + int(w / 2), y + h),
|
(x - pivot_img_size, y + h - 10),
|
||||||
(
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
x + int(w / 2) - int(w / 4),
|
0.5,
|
||||||
y + h + int(pivot_img_size / 2),
|
text_color,
|
||||||
),
|
1,
|
||||||
(67, 67, 67),
|
)
|
||||||
1,
|
|
||||||
)
|
|
||||||
cv2.line(
|
|
||||||
freeze_img,
|
|
||||||
(
|
|
||||||
x + int(w / 2) - int(w / 4),
|
|
||||||
y + h + int(pivot_img_size / 2),
|
|
||||||
),
|
|
||||||
(x, y + h + int(pivot_img_size / 2)),
|
|
||||||
(67, 67, 67),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
|
# connect face and text
|
||||||
# top left
|
cv2.line(
|
||||||
freeze_img[
|
freeze_img,
|
||||||
y - pivot_img_size : y, x - pivot_img_size : x
|
(x + int(w / 2), y + h),
|
||||||
] = display_img
|
(
|
||||||
|
x + int(w / 2) - int(w / 4),
|
||||||
|
y + h + int(pivot_img_size / 2),
|
||||||
|
),
|
||||||
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
cv2.line(
|
||||||
|
freeze_img,
|
||||||
|
(
|
||||||
|
x + int(w / 2) - int(w / 4),
|
||||||
|
y + h + int(pivot_img_size / 2),
|
||||||
|
),
|
||||||
|
(x, y + h + int(pivot_img_size / 2)),
|
||||||
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
overlay = freeze_img.copy()
|
elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
|
||||||
opacity = 0.4
|
# top left
|
||||||
cv2.rectangle(
|
freeze_img[
|
||||||
freeze_img,
|
y - pivot_img_size : y, x - pivot_img_size : x
|
||||||
(x - pivot_img_size, y),
|
] = display_img
|
||||||
(x, y + 20),
|
|
||||||
(46, 200, 255),
|
|
||||||
cv2.FILLED,
|
|
||||||
)
|
|
||||||
cv2.addWeighted(
|
|
||||||
overlay,
|
|
||||||
opacity,
|
|
||||||
freeze_img,
|
|
||||||
1 - opacity,
|
|
||||||
0,
|
|
||||||
freeze_img,
|
|
||||||
)
|
|
||||||
|
|
||||||
cv2.putText(
|
overlay = freeze_img.copy()
|
||||||
freeze_img,
|
opacity = 0.4
|
||||||
label,
|
cv2.rectangle(
|
||||||
(x - pivot_img_size, y + 10),
|
freeze_img,
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
(x - pivot_img_size, y),
|
||||||
0.5,
|
(x, y + 20),
|
||||||
text_color,
|
(46, 200, 255),
|
||||||
1,
|
cv2.FILLED,
|
||||||
)
|
)
|
||||||
|
cv2.addWeighted(
|
||||||
|
overlay,
|
||||||
|
opacity,
|
||||||
|
freeze_img,
|
||||||
|
1 - opacity,
|
||||||
|
0,
|
||||||
|
freeze_img,
|
||||||
|
)
|
||||||
|
|
||||||
# connect face and text
|
cv2.putText(
|
||||||
cv2.line(
|
freeze_img,
|
||||||
freeze_img,
|
label,
|
||||||
(x + int(w / 2), y),
|
(x - pivot_img_size, y + 10),
|
||||||
(
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
x + int(w / 2) - int(w / 4),
|
0.5,
|
||||||
y - int(pivot_img_size / 2),
|
text_color,
|
||||||
),
|
1,
|
||||||
(67, 67, 67),
|
)
|
||||||
1,
|
|
||||||
)
|
|
||||||
cv2.line(
|
|
||||||
freeze_img,
|
|
||||||
(
|
|
||||||
x + int(w / 2) - int(w / 4),
|
|
||||||
y - int(pivot_img_size / 2),
|
|
||||||
),
|
|
||||||
(x, y - int(pivot_img_size / 2)),
|
|
||||||
(67, 67, 67),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif (
|
# connect face and text
|
||||||
x + w + pivot_img_size < resolution_x
|
cv2.line(
|
||||||
and y + h + pivot_img_size < resolution_y
|
freeze_img,
|
||||||
):
|
(x + int(w / 2), y),
|
||||||
# bottom righ
|
(
|
||||||
freeze_img[
|
x + int(w / 2) - int(w / 4),
|
||||||
y + h : y + h + pivot_img_size,
|
y - int(pivot_img_size / 2),
|
||||||
x + w : x + w + pivot_img_size,
|
),
|
||||||
] = display_img
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
cv2.line(
|
||||||
|
freeze_img,
|
||||||
|
(
|
||||||
|
x + int(w / 2) - int(w / 4),
|
||||||
|
y - int(pivot_img_size / 2),
|
||||||
|
),
|
||||||
|
(x, y - int(pivot_img_size / 2)),
|
||||||
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
overlay = freeze_img.copy()
|
elif (
|
||||||
opacity = 0.4
|
x + w + pivot_img_size < resolution_x
|
||||||
cv2.rectangle(
|
and y + h + pivot_img_size < resolution_y
|
||||||
freeze_img,
|
):
|
||||||
(x + w, y + h - 20),
|
# bottom righ
|
||||||
(x + w + pivot_img_size, y + h),
|
freeze_img[
|
||||||
(46, 200, 255),
|
y + h : y + h + pivot_img_size,
|
||||||
cv2.FILLED,
|
x + w : x + w + pivot_img_size,
|
||||||
)
|
] = display_img
|
||||||
cv2.addWeighted(
|
|
||||||
overlay,
|
|
||||||
opacity,
|
|
||||||
freeze_img,
|
|
||||||
1 - opacity,
|
|
||||||
0,
|
|
||||||
freeze_img,
|
|
||||||
)
|
|
||||||
|
|
||||||
cv2.putText(
|
overlay = freeze_img.copy()
|
||||||
freeze_img,
|
opacity = 0.4
|
||||||
label,
|
cv2.rectangle(
|
||||||
(x + w, y + h - 10),
|
freeze_img,
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
(x + w, y + h - 20),
|
||||||
0.5,
|
(x + w + pivot_img_size, y + h),
|
||||||
text_color,
|
(46, 200, 255),
|
||||||
1,
|
cv2.FILLED,
|
||||||
)
|
)
|
||||||
|
cv2.addWeighted(
|
||||||
|
overlay,
|
||||||
|
opacity,
|
||||||
|
freeze_img,
|
||||||
|
1 - opacity,
|
||||||
|
0,
|
||||||
|
freeze_img,
|
||||||
|
)
|
||||||
|
|
||||||
# connect face and text
|
cv2.putText(
|
||||||
cv2.line(
|
freeze_img,
|
||||||
freeze_img,
|
label,
|
||||||
(x + int(w / 2), y + h),
|
(x + w, y + h - 10),
|
||||||
(
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
x + int(w / 2) + int(w / 4),
|
0.5,
|
||||||
y + h + int(pivot_img_size / 2),
|
text_color,
|
||||||
),
|
1,
|
||||||
(67, 67, 67),
|
)
|
||||||
1,
|
|
||||||
)
|
# connect face and text
|
||||||
cv2.line(
|
cv2.line(
|
||||||
freeze_img,
|
freeze_img,
|
||||||
(
|
(x + int(w / 2), y + h),
|
||||||
x + int(w / 2) + int(w / 4),
|
(
|
||||||
y + h + int(pivot_img_size / 2),
|
x + int(w / 2) + int(w / 4),
|
||||||
),
|
y + h + int(pivot_img_size / 2),
|
||||||
(x + w, y + h + int(pivot_img_size / 2)),
|
),
|
||||||
(67, 67, 67),
|
(67, 67, 67),
|
||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
except Exception as err: # pylint: disable=broad-except
|
cv2.line(
|
||||||
print(str(err))
|
freeze_img,
|
||||||
|
(
|
||||||
|
x + int(w / 2) + int(w / 4),
|
||||||
|
y + h + int(pivot_img_size / 2),
|
||||||
|
),
|
||||||
|
(x + w, y + h + int(pivot_img_size / 2)),
|
||||||
|
(67, 67, 67),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
except Exception as err: # pylint: disable=broad-except
|
||||||
|
print(str(err))
|
||||||
|
|
||||||
tic = time.time() # in this way, freezed image can show 5 seconds
|
tic = time.time() # in this way, freezed image can show 5 seconds
|
||||||
|
|
||||||
|
@ -24,6 +24,9 @@ elif tf_version == 2:
|
|||||||
)
|
)
|
||||||
# -------------------------------------------
|
# -------------------------------------------
|
||||||
|
|
||||||
|
# Labels for the emotions that can be detected by the model.
|
||||||
|
labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
|
||||||
|
|
||||||
|
|
||||||
def loadModel(
|
def loadModel(
|
||||||
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
|
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
|
||||||
@ -70,7 +73,3 @@ def loadModel(
|
|||||||
model.load_weights(home + "/.deepface/weights/facial_expression_model_weights.h5")
|
model.load_weights(home + "/.deepface/weights/facial_expression_model_weights.h5")
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
EMOTION_LABELS = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
|
||||||
"""Labels for the emotions that can be detected by the model."""
|
|
||||||
|
@ -18,6 +18,11 @@ elif tf_version == 2:
|
|||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
|
|
||||||
|
# Labels for the genders that can be detected by the model.
|
||||||
|
labels = ["Woman", "Man"]
|
||||||
|
|
||||||
|
|
||||||
def loadModel(
|
def loadModel(
|
||||||
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
|
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
|
||||||
):
|
):
|
||||||
@ -51,9 +56,3 @@ def loadModel(
|
|||||||
gender_model.load_weights(home + "/.deepface/weights/gender_model_weights.h5")
|
gender_model.load_weights(home + "/.deepface/weights/gender_model_weights.h5")
|
||||||
|
|
||||||
return gender_model
|
return gender_model
|
||||||
|
|
||||||
# --------------------------
|
|
||||||
|
|
||||||
|
|
||||||
GENDER_LABELS = ["Woman", "Man"]
|
|
||||||
"""Labels for the genders that can be detected by the model."""
|
|
||||||
|
@ -17,6 +17,10 @@ elif tf_version == 2:
|
|||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
||||||
# --------------------------
|
# --------------------------
|
||||||
|
# Labels for the ethnic phenotypes that can be detected by the model.
|
||||||
|
labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispanic"]
|
||||||
|
|
||||||
|
|
||||||
def loadModel(
|
def loadModel(
|
||||||
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
|
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
|
||||||
):
|
):
|
||||||
@ -50,7 +54,3 @@ def loadModel(
|
|||||||
race_model.load_weights(home + "/.deepface/weights/race_model_single_batch.h5")
|
race_model.load_weights(home + "/.deepface/weights/race_model_single_batch.h5")
|
||||||
|
|
||||||
return race_model
|
return race_model
|
||||||
|
|
||||||
|
|
||||||
RACE_LABELS = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
|
||||||
"""Labels for the ethnic phenotypes that can be detected by the model."""
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user