list operations supported

This commit is contained in:
Şefik Serangil 2020-03-05 15:20:45 +03:00
parent 01aa0a7a25
commit 171c1a81d7
3 changed files with 265 additions and 157 deletions

View File

@ -34,6 +34,16 @@ print("Is verified: ", result["verified"])
print("Distance: ", result["distance"]) print("Distance: ", result["distance"])
``` ```
Each call of verification function builds a face recognition model scratch and this is a costly operation. If you are going to verify multiple faces sequentially, then you should pass an array to verify function to speed up.
```python
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg'],
['dataset/img5.jpg', 'dataset/img6.jpg']
]
result = DeepFace.verify(dataset)
```
## Face recognition models ## Face recognition models
Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model. Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model.
@ -66,6 +76,8 @@ Deepface also offers facial attribute analysis including [`age`](https://sefiks.
from deepface import DeepFace from deepface import DeepFace
demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything
#demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above #demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above
demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"])
``` ```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/test-case-1.jpg" width="20%" height="20%"></p> <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/test-case-1.jpg" width="20%" height="20%"></p>

View File

@ -17,23 +17,19 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, distance as dst from deepface.commons import functions, distance as dst
def verify(img1_path, img2_path def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False): , model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
tic = time.time() tic = time.time()
if os.path.isfile(img1_path) != True: if type(img1_path) == list:
raise ValueError("Confirm that ",img1_path," exists") bulkProcess = True
img_list = img1_path.copy()
else:
bulkProcess = False
img_list = [[img1_path, img2_path]]
if os.path.isfile(img2_path) != True: #------------------------------
raise ValueError("Confirm that ",img2_path," exists")
#-------------------------
#tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
#-------------------------
if model_name == 'VGG-Face': if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.") print("Using VGG-Face model backend and", distance_metric,"distance.")
@ -58,81 +54,116 @@ def verify(img1_path, img2_path
else: else:
raise ValueError("Invalid model_name passed - ", model_name) raise ValueError("Invalid model_name passed - ", model_name)
#------------------------- #------------------------------
#crop face
img1 = functions.detectFace(img1_path, input_shape) #tuned thresholds for model and metric pair
img2 = functions.detectFace(img2_path, input_shape) threshold = functions.findThreshold(model_name, distance_metric)
#------------------------- #------------------------------
#find embeddings resp_objects = []
for instance in img_list:
if type(instance) == list and len(instance) >= 2:
img1_path = instance[0]
img2_path = instance[1]
img1_representation = model.predict(img1)[0,:] #----------------------
img2_representation = model.predict(img2)[0,:]
#------------------------- if os.path.isfile(img1_path) != True:
#find distances between embeddings raise ValueError("Confirm that ",img1_path," exists")
if distance_metric == 'cosine': if os.path.isfile(img2_path) != True:
distance = dst.findCosineDistance(img1_representation, img2_representation) raise ValueError("Confirm that ",img2_path," exists")
elif distance_metric == 'euclidean':
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
#------------------------- #----------------------
#decision #crop and align faces
if distance <= threshold: img1 = functions.detectFace(img1_path, input_shape)
identified = "true" img2 = functions.detectFace(img2_path, input_shape)
else:
identified = "false"
#------------------------- #----------------------
#find embeddings
if plot: img1_representation = model.predict(img1)[0,:]
label = "Verified: "+identified img2_representation = model.predict(img2)[0,:]
label += "\nThreshold: "+str(round(distance, 2))
label += ", Max Threshold to Verify: "+str(threshold)
label += "\nModel: "+model_name
label += ", Similarity metric: "+distance_metric
fig = plt.figure() #----------------------
fig.add_subplot(1,2, 1) #find distances between embeddings
plt.imshow(img1[0][:, :, ::-1])
plt.xticks([]); plt.yticks([]) if distance_metric == 'cosine':
fig.add_subplot(1,2, 2) distance = dst.findCosineDistance(img1_representation, img2_representation)
plt.imshow(img2[0][:, :, ::-1]) elif distance_metric == 'euclidean':
plt.xticks([]); plt.yticks([]) distance = dst.findEuclideanDistance(img1_representation, img2_representation)
fig.suptitle(label, fontsize=17) elif distance_metric == 'euclidean_l2':
plt.show(block=True) distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
#----------------------
#decision
if distance <= threshold:
identified = "true"
else:
identified = "false"
#----------------------
if plot:
label = "Verified: "+identified
label += "\nThreshold: "+str(round(distance, 2))
label += ", Max Threshold to Verify: "+str(threshold)
label += "\nModel: "+model_name
label += ", Similarity metric: "+distance_metric
fig = plt.figure()
fig.add_subplot(1,2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.add_subplot(1,2, 2)
plt.imshow(img2[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.suptitle(label, fontsize=17)
plt.show(block=True)
#----------------------
#response object
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
#----------------------
else:
raise ValueError("Invalid arguments passed to verify function: ", instance)
#------------------------- #-------------------------
toc = time.time() toc = time.time()
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json
#print("identification lasts ",toc-tic," seconds") #print("identification lasts ",toc-tic," seconds")
return resp_obj if bulkProcess == True:
return resp_objects
def analyze(img_path, actions= []): def analyze(img_path, actions= []):
if os.path.isfile(img_path) != True: if type(img_path) == list:
raise ValueError("Confirm that ",img_path," exists") img_paths = img_path.copy()
bulkProcess = True
else:
img_paths = [img_path]
bulkProcess = False
resp_obj = "{" #---------------------------------
#if a specific target is not passed, then find them all #if a specific target is not passed, then find them all
if len(actions) == 0: if len(actions) == 0:
@ -140,95 +171,123 @@ def analyze(img_path, actions= []):
print("Actions to do: ", actions) print("Actions to do: ", actions)
#TO-DO: do this in parallel #---------------------------------
pbar = tqdm(range(0,len(actions)), desc='Finding actions') if 'emotion' in actions:
emotion_model = Emotion.loadModel()
action_idx = 0 if 'age' in actions:
#for action in actions: age_model = Age.loadModel()
for index in pbar:
action = actions[index]
pbar.set_description("Action: %s" % (action))
if action_idx > 0: if 'gender' in actions:
resp_obj += ", " gender_model = Gender.loadModel()
if action == 'emotion': if 'race' in actions:
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] race_model = Race.loadModel()
img = functions.detectFace(img_path, (48, 48), True) #---------------------------------
model = Emotion.loadModel() resp_objects = []
emotion_predictions = model.predict(img)[0,:] for img_path in img_paths:
sum_of_predictions = emotion_predictions.sum() if type(img_path) != str:
raise ValueError("You should pass string data type for image paths but you passed ", type(img_path))
emotion_obj = "\"emotion\": {" if os.path.isfile(img_path) != True:
for i in range(0, len(emotion_labels)): raise ValueError("Confirm that ",img_path," exists")
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if i > 0: emotion_obj += ", " resp_obj = "{"
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction) #TO-DO: do this in parallel
emotion_obj += "}" pbar = tqdm(range(0,len(actions)), desc='Finding actions')
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)]) action_idx = 0
#for action in actions:
for index in pbar:
action = actions[index]
pbar.set_description("Action: %s" % (action))
resp_obj += emotion_obj if action_idx > 0:
resp_obj += ", "
elif action == 'age': if action == 'emotion':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
#print("age prediction") img = functions.detectFace(img_path, (48, 48), True)
model = Age.loadModel()
age_predictions = model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age) emotion_predictions = emotion_model.predict(img)[0,:]
elif action == 'gender': sum_of_predictions = emotion_predictions.sum()
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
model = Gender.loadModel() emotion_obj = "\"emotion\": {"
gender_prediction = model.predict(img)[0,:] for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if np.argmax(gender_prediction) == 0: if i > 0: emotion_obj += ", "
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender) emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
elif action == 'race': emotion_obj += "}"
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
model = Race.loadModel()
race_predictions = model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum() emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
race_obj = "\"race\": {" resp_obj += emotion_obj
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", " elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction")
age_predictions = age_model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
race_obj += "\"%s\": %s" % (race_label, race_prediction) resp_obj += "\"age\": %s" % (apparent_age)
race_obj += "}" elif action == 'gender':
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)]) img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
resp_obj += race_obj gender_prediction = gender_model.predict(img)[0,:]
action_idx = action_idx + 1 if np.argmax(gender_prediction) == 0:
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "}" resp_obj += "\"gender\": \"%s\"" % (gender)
resp_obj = json.loads(resp_obj) elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
race_predictions = race_model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
return resp_obj sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {"
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
resp_obj += race_obj
action_idx = action_idx + 1
resp_obj += "}"
resp_obj = json.loads(resp_obj)
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
if bulkProcess == True:
return resp_objects
def detectFace(img_path): def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3) img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)

View File

@ -5,6 +5,43 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#----------------------------------------- #-----------------------------------------
print("Bulk tests")
print("-----------------------------------------")
print("Bulk face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj[0]["verified"] == True)
print(resp_obj[1]["verified"] == True)
print("-----------------------------------------")
print("Bulk facial analysis tests")
dataset = [
'dataset/img1.jpg',
'dataset/img2.jpg',
'dataset/img5.jpg',
'dataset/img6.jpg'
]
resp_obj = DeepFace.analyze(dataset)
print(resp_obj[0]["age"]," years old ", resp_obj[0]["dominant_emotion"], " ",resp_obj[0]["gender"])
print(resp_obj[1]["age"]," years old ", resp_obj[1]["dominant_emotion"], " ",resp_obj[1]["gender"])
print(resp_obj[2]["age"]," years old ", resp_obj[2]["dominant_emotion"], " ",resp_obj[2]["gender"])
print(resp_obj[3]["age"]," years old ", resp_obj[3]["dominant_emotion"], " ",resp_obj[3]["gender"])
print("-----------------------------------------")
#-----------------------------------------
print("Facial analysis test. Passing nothing as an action") print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg" img = "dataset/img4.jpg"