list operations supported

This commit is contained in:
Şefik Serangil 2020-03-05 15:20:45 +03:00
parent 01aa0a7a25
commit 171c1a81d7
3 changed files with 265 additions and 157 deletions

View File

@ -34,6 +34,16 @@ print("Is verified: ", result["verified"])
print("Distance: ", result["distance"]) print("Distance: ", result["distance"])
``` ```
Each call of verification function builds a face recognition model scratch and this is a costly operation. If you are going to verify multiple faces sequentially, then you should pass an array to verify function to speed up.
```python
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg'],
['dataset/img5.jpg', 'dataset/img6.jpg']
]
result = DeepFace.verify(dataset)
```
## Face recognition models ## Face recognition models
Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model. Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model.
@ -66,6 +76,8 @@ Deepface also offers facial attribute analysis including [`age`](https://sefiks.
from deepface import DeepFace from deepface import DeepFace
demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything
#demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above #demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above
demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"])
``` ```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/test-case-1.jpg" width="20%" height="20%"></p> <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/test-case-1.jpg" width="20%" height="20%"></p>

View File

@ -17,23 +17,19 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, distance as dst from deepface.commons import functions, distance as dst
def verify(img1_path, img2_path def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False): , model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
tic = time.time() tic = time.time()
if os.path.isfile(img1_path) != True: if type(img1_path) == list:
raise ValueError("Confirm that ",img1_path," exists") bulkProcess = True
img_list = img1_path.copy()
if os.path.isfile(img2_path) != True: else:
raise ValueError("Confirm that ",img2_path," exists") bulkProcess = False
img_list = [[img1_path, img2_path]]
#-------------------------
#------------------------------
#tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
#-------------------------
if model_name == 'VGG-Face': if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.") print("Using VGG-Face model backend and", distance_metric,"distance.")
@ -57,82 +53,117 @@ def verify(img1_path, img2_path
else: else:
raise ValueError("Invalid model_name passed - ", model_name) raise ValueError("Invalid model_name passed - ", model_name)
#-------------------------
#crop face
img1 = functions.detectFace(img1_path, input_shape) #------------------------------
img2 = functions.detectFace(img2_path, input_shape)
#------------------------- #tuned thresholds for model and metric pair
#find embeddings threshold = functions.findThreshold(model_name, distance_metric)
img1_representation = model.predict(img1)[0,:] #------------------------------
img2_representation = model.predict(img2)[0,:] resp_objects = []
for instance in img_list:
if type(instance) == list and len(instance) >= 2:
img1_path = instance[0]
img2_path = instance[1]
#----------------------
if os.path.isfile(img1_path) != True:
raise ValueError("Confirm that ",img1_path," exists")
if os.path.isfile(img2_path) != True:
raise ValueError("Confirm that ",img2_path," exists")
#----------------------
#crop and align faces
img1 = functions.detectFace(img1_path, input_shape)
img2 = functions.detectFace(img2_path, input_shape)
#----------------------
#find embeddings
#------------------------- img1_representation = model.predict(img1)[0,:]
#find distances between embeddings img2_representation = model.predict(img2)[0,:]
if distance_metric == 'cosine': #----------------------
distance = dst.findCosineDistance(img1_representation, img2_representation) #find distances between embeddings
elif distance_metric == 'euclidean':
distance = dst.findEuclideanDistance(img1_representation, img2_representation) if distance_metric == 'cosine':
elif distance_metric == 'euclidean_l2': distance = dst.findCosineDistance(img1_representation, img2_representation)
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) elif distance_metric == 'euclidean':
else: distance = dst.findEuclideanDistance(img1_representation, img2_representation)
raise ValueError("Invalid distance_metric passed - ", distance_metric) elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
#------------------------- else:
#decision raise ValueError("Invalid distance_metric passed - ", distance_metric)
if distance <= threshold: #----------------------
identified = "true" #decision
else:
identified = "false" if distance <= threshold:
identified = "true"
#------------------------- else:
identified = "false"
if plot: #----------------------
label = "Verified: "+identified if plot:
label += "\nThreshold: "+str(round(distance, 2)) label = "Verified: "+identified
label += ", Max Threshold to Verify: "+str(threshold) label += "\nThreshold: "+str(round(distance, 2))
label += "\nModel: "+model_name label += ", Max Threshold to Verify: "+str(threshold)
label += ", Similarity metric: "+distance_metric label += "\nModel: "+model_name
label += ", Similarity metric: "+distance_metric
fig = plt.figure()
fig.add_subplot(1,2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.add_subplot(1,2, 2)
plt.imshow(img2[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.suptitle(label, fontsize=17)
plt.show(block=True)
#----------------------
#response object
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
#----------------------
else:
raise ValueError("Invalid arguments passed to verify function: ", instance)
fig = plt.figure()
fig.add_subplot(1,2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.add_subplot(1,2, 2)
plt.imshow(img2[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.suptitle(label, fontsize=17)
plt.show(block=True)
#------------------------- #-------------------------
toc = time.time() toc = time.time()
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json
#print("identification lasts ",toc-tic," seconds") #print("identification lasts ",toc-tic," seconds")
return resp_obj if bulkProcess == True:
return resp_objects
def analyze(img_path, actions= []): def analyze(img_path, actions= []):
if os.path.isfile(img_path) != True: if type(img_path) == list:
raise ValueError("Confirm that ",img_path," exists") img_paths = img_path.copy()
bulkProcess = True
else:
img_paths = [img_path]
bulkProcess = False
resp_obj = "{" #---------------------------------
#if a specific target is not passed, then find them all #if a specific target is not passed, then find them all
if len(actions) == 0: if len(actions) == 0:
@ -140,95 +171,123 @@ def analyze(img_path, actions= []):
print("Actions to do: ", actions) print("Actions to do: ", actions)
#TO-DO: do this in parallel #---------------------------------
pbar = tqdm(range(0,len(actions)), desc='Finding actions') if 'emotion' in actions:
emotion_model = Emotion.loadModel()
action_idx = 0 if 'age' in actions:
#for action in actions: age_model = Age.loadModel()
for index in pbar:
action = actions[index] if 'gender' in actions:
pbar.set_description("Action: %s" % (action)) gender_model = Gender.loadModel()
if 'race' in actions:
race_model = Race.loadModel()
#---------------------------------
resp_objects = []
for img_path in img_paths:
if action_idx > 0: if type(img_path) != str:
resp_obj += ", " raise ValueError("You should pass string data type for image paths but you passed ", type(img_path))
if action == 'emotion': if os.path.isfile(img_path) != True:
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] raise ValueError("Confirm that ",img_path," exists")
img = functions.detectFace(img_path, (48, 48), True)
model = Emotion.loadModel()
emotion_predictions = model.predict(img)[0,:]
sum_of_predictions = emotion_predictions.sum()
emotion_obj = "\"emotion\": {"
for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if i > 0: emotion_obj += ", "
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
emotion_obj += "}"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
resp_obj += emotion_obj
elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction")
model = Age.loadModel()
age_predictions = model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age)
elif action == 'gender':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
model = Gender.loadModel()
gender_prediction = model.predict(img)[0,:]
if np.argmax(gender_prediction) == 0:
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender)
elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
model = Race.loadModel()
race_predictions = model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {"
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
resp_obj += race_obj
action_idx = action_idx + 1 resp_obj = "{"
#TO-DO: do this in parallel
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
action_idx = 0
#for action in actions:
for index in pbar:
action = actions[index]
pbar.set_description("Action: %s" % (action))
if action_idx > 0:
resp_obj += ", "
if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, (48, 48), True)
emotion_predictions = emotion_model.predict(img)[0,:]
sum_of_predictions = emotion_predictions.sum()
emotion_obj = "\"emotion\": {"
for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if i > 0: emotion_obj += ", "
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
emotion_obj += "}"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
resp_obj += emotion_obj
elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction")
age_predictions = age_model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age)
elif action == 'gender':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
gender_prediction = gender_model.predict(img)[0,:]
if np.argmax(gender_prediction) == 0:
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender)
elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
race_predictions = race_model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {"
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
resp_obj += race_obj
action_idx = action_idx + 1
resp_obj += "}"
resp_obj = json.loads(resp_obj)
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
resp_obj += "}" if bulkProcess == True:
return resp_objects
resp_obj = json.loads(resp_obj)
return resp_obj
def detectFace(img_path): def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3) img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)

View File

@ -5,6 +5,43 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#----------------------------------------- #-----------------------------------------
print("Bulk tests")
print("-----------------------------------------")
print("Bulk face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj[0]["verified"] == True)
print(resp_obj[1]["verified"] == True)
print("-----------------------------------------")
print("Bulk facial analysis tests")
dataset = [
'dataset/img1.jpg',
'dataset/img2.jpg',
'dataset/img5.jpg',
'dataset/img6.jpg'
]
resp_obj = DeepFace.analyze(dataset)
print(resp_obj[0]["age"]," years old ", resp_obj[0]["dominant_emotion"], " ",resp_obj[0]["gender"])
print(resp_obj[1]["age"]," years old ", resp_obj[1]["dominant_emotion"], " ",resp_obj[1]["gender"])
print(resp_obj[2]["age"]," years old ", resp_obj[2]["dominant_emotion"], " ",resp_obj[2]["gender"])
print(resp_obj[3]["age"]," years old ", resp_obj[3]["dominant_emotion"], " ",resp_obj[3]["gender"])
print("-----------------------------------------")
#-----------------------------------------
print("Facial analysis test. Passing nothing as an action") print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg" img = "dataset/img4.jpg"