mirror of
https://github.com/serengil/deepface.git
synced 2025-06-06 19:45:21 +00:00
Load models to memory
2 new functions: verify_init(model_name) analyze_init(models) The functions created in order to pre load models to memory for shorter analzye / verify first time Ex: Real time analyze waiting for first face to show without analyze_init() first analyze takes 1.5 sec with analyze_init() first analyze takes 0.3 sec
This commit is contained in:
parent
8cc09f4314
commit
93c9462161
@ -21,73 +21,117 @@ from deepface.extendedmodels import Age, Gender, Race, Emotion
|
|||||||
from deepface.commons import functions, realtime, distance as dst
|
from deepface.commons import functions, realtime, distance as dst
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_init(models = []):
|
||||||
|
#---------------------------------
|
||||||
|
|
||||||
|
#if a specific target is not passed, then find them all
|
||||||
|
if len(models) == 0:
|
||||||
|
models = ['emotion', 'age', 'gender', 'race']
|
||||||
|
|
||||||
|
print("Models to initialize: ", models)
|
||||||
|
|
||||||
|
#---------------------------------
|
||||||
|
|
||||||
|
if 'emotion' in models:
|
||||||
|
emotion_model = Emotion.loadModel()
|
||||||
|
|
||||||
|
if 'age' in models:
|
||||||
|
age_model = Age.loadModel()
|
||||||
|
|
||||||
|
if 'gender' in models:
|
||||||
|
gender_model = Gender.loadModel()
|
||||||
|
|
||||||
|
if 'race' in models:
|
||||||
|
race_model = Race.loadModel()
|
||||||
|
|
||||||
|
|
||||||
|
def verify_init(model_name = 'VGG-Face'):
|
||||||
|
if model_name == 'VGG-Face':
|
||||||
|
print("Loading %s model" % model_name)
|
||||||
|
model = VGGFace.loadModel()
|
||||||
|
|
||||||
|
elif model_name == 'OpenFace':
|
||||||
|
print("Loading %s model" % model_name)
|
||||||
|
model = OpenFace.loadModel()
|
||||||
|
|
||||||
|
elif model_name == 'Facenet':
|
||||||
|
print("Loading %s model" % model_name)
|
||||||
|
model = Facenet.loadModel()
|
||||||
|
|
||||||
|
elif model_name == 'DeepFace':
|
||||||
|
print("Loading %s model" % model_name)
|
||||||
|
model = FbDeepFace.loadModel()
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid model_name passed - ", model_name)
|
||||||
|
|
||||||
|
|
||||||
def verify(img1_path, img2_path=''
|
def verify(img1_path, img2_path=''
|
||||||
, model_name ='VGG-Face', distance_metric = 'cosine', model = None):
|
, model_name ='VGG-Face', distance_metric = 'cosine', model = None):
|
||||||
|
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
|
|
||||||
if type(img1_path) == list:
|
if type(img1_path) == list:
|
||||||
bulkProcess = True
|
bulkProcess = True
|
||||||
img_list = img1_path.copy()
|
img_list = img1_path.copy()
|
||||||
else:
|
else:
|
||||||
bulkProcess = False
|
bulkProcess = False
|
||||||
img_list = [[img1_path, img2_path]]
|
img_list = [[img1_path, img2_path]]
|
||||||
|
|
||||||
#------------------------------
|
#------------------------------
|
||||||
|
|
||||||
if model == None:
|
if model == None:
|
||||||
if model_name == 'VGG-Face':
|
if model_name == 'VGG-Face':
|
||||||
print("Using VGG-Face model backend and", distance_metric,"distance.")
|
print("Using VGG-Face model backend and", distance_metric,"distance.")
|
||||||
model = VGGFace.loadModel()
|
model = VGGFace.loadModel()
|
||||||
|
|
||||||
elif model_name == 'OpenFace':
|
elif model_name == 'OpenFace':
|
||||||
print("Using OpenFace model backend", distance_metric,"distance.")
|
print("Using OpenFace model backend", distance_metric,"distance.")
|
||||||
model = OpenFace.loadModel()
|
model = OpenFace.loadModel()
|
||||||
|
|
||||||
elif model_name == 'Facenet':
|
elif model_name == 'Facenet':
|
||||||
print("Using Facenet model backend", distance_metric,"distance.")
|
print("Using Facenet model backend", distance_metric,"distance.")
|
||||||
model = Facenet.loadModel()
|
model = Facenet.loadModel()
|
||||||
|
|
||||||
elif model_name == 'DeepFace':
|
elif model_name == 'DeepFace':
|
||||||
print("Using FB DeepFace model backend", distance_metric,"distance.")
|
print("Using FB DeepFace model backend", distance_metric,"distance.")
|
||||||
model = FbDeepFace.loadModel()
|
model = FbDeepFace.loadModel()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid model_name passed - ", model_name)
|
raise ValueError("Invalid model_name passed - ", model_name)
|
||||||
else: #model != None
|
else: #model != None
|
||||||
print("Already built model is passed")
|
print("Already built model is passed")
|
||||||
|
|
||||||
#------------------------------
|
#------------------------------
|
||||||
#face recognition models have different size of inputs
|
#face recognition models have different size of inputs
|
||||||
input_shape = model.layers[0].input_shape[1:3]
|
input_shape = model.layers[0].input_shape[1:3]
|
||||||
|
|
||||||
#------------------------------
|
#------------------------------
|
||||||
|
|
||||||
#tuned thresholds for model and metric pair
|
#tuned thresholds for model and metric pair
|
||||||
threshold = functions.findThreshold(model_name, distance_metric)
|
threshold = functions.findThreshold(model_name, distance_metric)
|
||||||
|
|
||||||
#------------------------------
|
#------------------------------
|
||||||
resp_objects = []
|
resp_objects = []
|
||||||
for instance in img_list:
|
for instance in img_list:
|
||||||
if type(instance) == list and len(instance) >= 2:
|
if type(instance) == list and len(instance) >= 2:
|
||||||
img1_path = instance[0]
|
img1_path = instance[0]
|
||||||
img2_path = instance[1]
|
img2_path = instance[1]
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#crop and align faces
|
#crop and align faces
|
||||||
|
|
||||||
img1 = functions.detectFace(img1_path, input_shape)
|
img1 = functions.detectFace(img1_path, input_shape)
|
||||||
img2 = functions.detectFace(img2_path, input_shape)
|
img2 = functions.detectFace(img2_path, input_shape)
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#find embeddings
|
#find embeddings
|
||||||
|
|
||||||
img1_representation = model.predict(img1)[0,:]
|
img1_representation = model.predict(img1)[0,:]
|
||||||
img2_representation = model.predict(img2)[0,:]
|
img2_representation = model.predict(img2)[0,:]
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#find distances between embeddings
|
#find distances between embeddings
|
||||||
|
|
||||||
if distance_metric == 'cosine':
|
if distance_metric == 'cosine':
|
||||||
distance = dst.findCosineDistance(img1_representation, img2_representation)
|
distance = dst.findCosineDistance(img1_representation, img2_representation)
|
||||||
elif distance_metric == 'euclidean':
|
elif distance_metric == 'euclidean':
|
||||||
@ -96,18 +140,18 @@ def verify(img1_path, img2_path=''
|
|||||||
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
|
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid distance_metric passed - ", distance_metric)
|
raise ValueError("Invalid distance_metric passed - ", distance_metric)
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#decision
|
#decision
|
||||||
|
|
||||||
if distance <= threshold:
|
if distance <= threshold:
|
||||||
identified = "true"
|
identified = "true"
|
||||||
else:
|
else:
|
||||||
identified = "false"
|
identified = "false"
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
#response object
|
#response object
|
||||||
|
|
||||||
resp_obj = "{"
|
resp_obj = "{"
|
||||||
resp_obj += "\"verified\": "+identified
|
resp_obj += "\"verified\": "+identified
|
||||||
resp_obj += ", \"distance\": "+str(distance)
|
resp_obj += ", \"distance\": "+str(distance)
|
||||||
@ -115,30 +159,30 @@ def verify(img1_path, img2_path=''
|
|||||||
resp_obj += ", \"model\": \""+model_name+"\""
|
resp_obj += ", \"model\": \""+model_name+"\""
|
||||||
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
|
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
|
||||||
resp_obj += "}"
|
resp_obj += "}"
|
||||||
|
|
||||||
resp_obj = json.loads(resp_obj) #string to json
|
resp_obj = json.loads(resp_obj) #string to json
|
||||||
|
|
||||||
if bulkProcess == True:
|
if bulkProcess == True:
|
||||||
resp_objects.append(resp_obj)
|
resp_objects.append(resp_obj)
|
||||||
else:
|
else:
|
||||||
K.clear_session()
|
K.clear_session()
|
||||||
return resp_obj
|
return resp_obj
|
||||||
#----------------------
|
#----------------------
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid arguments passed to verify function: ", instance)
|
raise ValueError("Invalid arguments passed to verify function: ", instance)
|
||||||
|
|
||||||
#-------------------------
|
#-------------------------
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
|
|
||||||
#print("identification lasts ",toc-tic," seconds")
|
#print("identification lasts ",toc-tic," seconds")
|
||||||
|
|
||||||
if bulkProcess == True:
|
if bulkProcess == True:
|
||||||
resp_obj = "{"
|
resp_obj = "{"
|
||||||
|
|
||||||
for i in range(0, len(resp_objects)):
|
for i in range(0, len(resp_objects)):
|
||||||
resp_item = json.dumps(resp_objects[i])
|
resp_item = json.dumps(resp_objects[i])
|
||||||
|
|
||||||
if i > 0:
|
if i > 0:
|
||||||
resp_obj += ", "
|
resp_obj += ", "
|
||||||
@ -157,129 +201,129 @@ def analyze(img_path, actions= []):
|
|||||||
else:
|
else:
|
||||||
img_paths = [img_path]
|
img_paths = [img_path]
|
||||||
bulkProcess = False
|
bulkProcess = False
|
||||||
|
|
||||||
#---------------------------------
|
#---------------------------------
|
||||||
|
|
||||||
#if a specific target is not passed, then find them all
|
#if a specific target is not passed, then find them all
|
||||||
if len(actions) == 0:
|
if len(actions) == 0:
|
||||||
actions= ['emotion', 'age', 'gender', 'race']
|
actions= ['emotion', 'age', 'gender', 'race']
|
||||||
|
|
||||||
print("Actions to do: ", actions)
|
print("Actions to do: ", actions)
|
||||||
|
|
||||||
#---------------------------------
|
#---------------------------------
|
||||||
|
|
||||||
if 'emotion' in actions:
|
if 'emotion' in actions:
|
||||||
emotion_model = Emotion.loadModel()
|
emotion_model = Emotion.loadModel()
|
||||||
|
|
||||||
if 'age' in actions:
|
if 'age' in actions:
|
||||||
age_model = Age.loadModel()
|
age_model = Age.loadModel()
|
||||||
|
|
||||||
if 'gender' in actions:
|
if 'gender' in actions:
|
||||||
gender_model = Gender.loadModel()
|
gender_model = Gender.loadModel()
|
||||||
|
|
||||||
if 'race' in actions:
|
if 'race' in actions:
|
||||||
race_model = Race.loadModel()
|
race_model = Race.loadModel()
|
||||||
#---------------------------------
|
#---------------------------------
|
||||||
|
|
||||||
resp_objects = []
|
resp_objects = []
|
||||||
for img_path in img_paths:
|
for img_path in img_paths:
|
||||||
|
|
||||||
resp_obj = "{"
|
resp_obj = "{"
|
||||||
|
|
||||||
#TO-DO: do this in parallel
|
#TO-DO: do this in parallel
|
||||||
|
|
||||||
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
|
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
|
||||||
|
|
||||||
action_idx = 0
|
action_idx = 0
|
||||||
#for action in actions:
|
#for action in actions:
|
||||||
for index in pbar:
|
for index in pbar:
|
||||||
action = actions[index]
|
action = actions[index]
|
||||||
pbar.set_description("Action: %s" % (action))
|
pbar.set_description("Action: %s" % (action))
|
||||||
|
|
||||||
if action_idx > 0:
|
if action_idx > 0:
|
||||||
resp_obj += ", "
|
resp_obj += ", "
|
||||||
|
|
||||||
if action == 'emotion':
|
if action == 'emotion':
|
||||||
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
||||||
img = functions.detectFace(img_path, (48, 48), True)
|
img = functions.detectFace(img_path, (48, 48), True)
|
||||||
|
|
||||||
emotion_predictions = emotion_model.predict(img)[0,:]
|
emotion_predictions = emotion_model.predict(img)[0,:]
|
||||||
|
|
||||||
sum_of_predictions = emotion_predictions.sum()
|
sum_of_predictions = emotion_predictions.sum()
|
||||||
|
|
||||||
emotion_obj = "\"emotion\": {"
|
emotion_obj = "\"emotion\": {"
|
||||||
for i in range(0, len(emotion_labels)):
|
for i in range(0, len(emotion_labels)):
|
||||||
emotion_label = emotion_labels[i]
|
emotion_label = emotion_labels[i]
|
||||||
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
|
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
|
||||||
|
|
||||||
if i > 0: emotion_obj += ", "
|
if i > 0: emotion_obj += ", "
|
||||||
|
|
||||||
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
|
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
|
||||||
|
|
||||||
emotion_obj += "}"
|
emotion_obj += "}"
|
||||||
|
|
||||||
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
|
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
|
||||||
|
|
||||||
resp_obj += emotion_obj
|
resp_obj += emotion_obj
|
||||||
|
|
||||||
elif action == 'age':
|
elif action == 'age':
|
||||||
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
||||||
#print("age prediction")
|
#print("age prediction")
|
||||||
age_predictions = age_model.predict(img)[0,:]
|
age_predictions = age_model.predict(img)[0,:]
|
||||||
apparent_age = Age.findApparentAge(age_predictions)
|
apparent_age = Age.findApparentAge(age_predictions)
|
||||||
|
|
||||||
resp_obj += "\"age\": %s" % (apparent_age)
|
resp_obj += "\"age\": %s" % (apparent_age)
|
||||||
|
|
||||||
elif action == 'gender':
|
elif action == 'gender':
|
||||||
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
||||||
#print("gender prediction")
|
#print("gender prediction")
|
||||||
|
|
||||||
gender_prediction = gender_model.predict(img)[0,:]
|
gender_prediction = gender_model.predict(img)[0,:]
|
||||||
|
|
||||||
if np.argmax(gender_prediction) == 0:
|
if np.argmax(gender_prediction) == 0:
|
||||||
gender = "Woman"
|
gender = "Woman"
|
||||||
elif np.argmax(gender_prediction) == 1:
|
elif np.argmax(gender_prediction) == 1:
|
||||||
gender = "Man"
|
gender = "Man"
|
||||||
|
|
||||||
resp_obj += "\"gender\": \"%s\"" % (gender)
|
resp_obj += "\"gender\": \"%s\"" % (gender)
|
||||||
|
|
||||||
elif action == 'race':
|
elif action == 'race':
|
||||||
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
|
||||||
race_predictions = race_model.predict(img)[0,:]
|
race_predictions = race_model.predict(img)[0,:]
|
||||||
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
|
||||||
|
|
||||||
sum_of_predictions = race_predictions.sum()
|
sum_of_predictions = race_predictions.sum()
|
||||||
|
|
||||||
race_obj = "\"race\": {"
|
race_obj = "\"race\": {"
|
||||||
for i in range(0, len(race_labels)):
|
for i in range(0, len(race_labels)):
|
||||||
race_label = race_labels[i]
|
race_label = race_labels[i]
|
||||||
race_prediction = 100 * race_predictions[i] / sum_of_predictions
|
race_prediction = 100 * race_predictions[i] / sum_of_predictions
|
||||||
|
|
||||||
if i > 0: race_obj += ", "
|
if i > 0: race_obj += ", "
|
||||||
|
|
||||||
race_obj += "\"%s\": %s" % (race_label, race_prediction)
|
race_obj += "\"%s\": %s" % (race_label, race_prediction)
|
||||||
|
|
||||||
race_obj += "}"
|
race_obj += "}"
|
||||||
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
|
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
|
||||||
|
|
||||||
resp_obj += race_obj
|
resp_obj += race_obj
|
||||||
|
|
||||||
action_idx = action_idx + 1
|
action_idx = action_idx + 1
|
||||||
|
|
||||||
resp_obj += "}"
|
resp_obj += "}"
|
||||||
|
|
||||||
resp_obj = json.loads(resp_obj)
|
resp_obj = json.loads(resp_obj)
|
||||||
|
|
||||||
if bulkProcess == True:
|
if bulkProcess == True:
|
||||||
resp_objects.append(resp_obj)
|
resp_objects.append(resp_obj)
|
||||||
else:
|
else:
|
||||||
return resp_obj
|
return resp_obj
|
||||||
|
|
||||||
if bulkProcess == True:
|
if bulkProcess == True:
|
||||||
resp_obj = "{"
|
resp_obj = "{"
|
||||||
|
|
||||||
for i in range(0, len(resp_objects)):
|
for i in range(0, len(resp_objects)):
|
||||||
resp_item = json.dumps(resp_objects[i])
|
resp_item = json.dumps(resp_objects[i])
|
||||||
|
|
||||||
if i > 0:
|
if i > 0:
|
||||||
resp_obj += ", "
|
resp_obj += ", "
|
||||||
|
Loading…
x
Reference in New Issue
Block a user