diff --git a/README.md b/README.md
index e5aa327..3e1e81d 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,16 @@ print("Is verified: ", result["verified"])
print("Distance: ", result["distance"])
```
+Each call of verification function builds a face recognition model scratch and this is a costly operation. If you are going to verify multiple faces sequentially, then you should pass an array to verify function to speed up.
+
+```python
+dataset = [
+ ['dataset/img1.jpg', 'dataset/img2.jpg'],
+ ['dataset/img5.jpg', 'dataset/img6.jpg']
+]
+result = DeepFace.verify(dataset)
+```
+
## Face recognition models
Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model.
@@ -66,6 +76,8 @@ Deepface also offers facial attribute analysis including [`age`](https://sefiks.
from deepface import DeepFace
demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything
#demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above
+
+demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"])
```

diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py
index 49d833e..6935eca 100644
--- a/deepface/DeepFace.py
+++ b/deepface/DeepFace.py
@@ -17,23 +17,19 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, distance as dst
-def verify(img1_path, img2_path
+def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
tic = time.time()
- if os.path.isfile(img1_path) != True:
- raise ValueError("Confirm that ",img1_path," exists")
-
- if os.path.isfile(img2_path) != True:
- raise ValueError("Confirm that ",img2_path," exists")
-
- #-------------------------
-
- #tuned thresholds for model and metric pair
- threshold = functions.findThreshold(model_name, distance_metric)
-
- #-------------------------
+ if type(img1_path) == list:
+ bulkProcess = True
+ img_list = img1_path.copy()
+ else:
+ bulkProcess = False
+ img_list = [[img1_path, img2_path]]
+
+ #------------------------------
if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.")
@@ -57,82 +53,117 @@ def verify(img1_path, img2_path
else:
raise ValueError("Invalid model_name passed - ", model_name)
-
- #-------------------------
- #crop face
- img1 = functions.detectFace(img1_path, input_shape)
- img2 = functions.detectFace(img2_path, input_shape)
+ #------------------------------
- #-------------------------
- #find embeddings
+ #tuned thresholds for model and metric pair
+ threshold = functions.findThreshold(model_name, distance_metric)
- img1_representation = model.predict(img1)[0,:]
- img2_representation = model.predict(img2)[0,:]
+ #------------------------------
+ resp_objects = []
+ for instance in img_list:
+ if type(instance) == list and len(instance) >= 2:
+ img1_path = instance[0]
+ img2_path = instance[1]
+
+ #----------------------
+
+ if os.path.isfile(img1_path) != True:
+ raise ValueError("Confirm that ",img1_path," exists")
+
+ if os.path.isfile(img2_path) != True:
+ raise ValueError("Confirm that ",img2_path," exists")
+
+ #----------------------
+ #crop and align faces
+
+ img1 = functions.detectFace(img1_path, input_shape)
+ img2 = functions.detectFace(img2_path, input_shape)
+
+ #----------------------
+ #find embeddings
- #-------------------------
- #find distances between embeddings
-
- if distance_metric == 'cosine':
- distance = dst.findCosineDistance(img1_representation, img2_representation)
- elif distance_metric == 'euclidean':
- distance = dst.findEuclideanDistance(img1_representation, img2_representation)
- elif distance_metric == 'euclidean_l2':
- distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
- else:
- raise ValueError("Invalid distance_metric passed - ", distance_metric)
-
- #-------------------------
- #decision
-
- if distance <= threshold:
- identified = "true"
- else:
- identified = "false"
-
- #-------------------------
-
- if plot:
- label = "Verified: "+identified
- label += "\nThreshold: "+str(round(distance, 2))
- label += ", Max Threshold to Verify: "+str(threshold)
- label += "\nModel: "+model_name
- label += ", Similarity metric: "+distance_metric
+ img1_representation = model.predict(img1)[0,:]
+ img2_representation = model.predict(img2)[0,:]
+
+ #----------------------
+ #find distances between embeddings
+
+ if distance_metric == 'cosine':
+ distance = dst.findCosineDistance(img1_representation, img2_representation)
+ elif distance_metric == 'euclidean':
+ distance = dst.findEuclideanDistance(img1_representation, img2_representation)
+ elif distance_metric == 'euclidean_l2':
+ distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
+ else:
+ raise ValueError("Invalid distance_metric passed - ", distance_metric)
+
+ #----------------------
+ #decision
+
+ if distance <= threshold:
+ identified = "true"
+ else:
+ identified = "false"
+ #----------------------
+ if plot:
+ label = "Verified: "+identified
+ label += "\nThreshold: "+str(round(distance, 2))
+ label += ", Max Threshold to Verify: "+str(threshold)
+ label += "\nModel: "+model_name
+ label += ", Similarity metric: "+distance_metric
+
+ fig = plt.figure()
+ fig.add_subplot(1,2, 1)
+ plt.imshow(img1[0][:, :, ::-1])
+ plt.xticks([]); plt.yticks([])
+ fig.add_subplot(1,2, 2)
+ plt.imshow(img2[0][:, :, ::-1])
+ plt.xticks([]); plt.yticks([])
+ fig.suptitle(label, fontsize=17)
+ plt.show(block=True)
+
+ #----------------------
+ #response object
+
+ resp_obj = "{"
+ resp_obj += "\"verified\": "+identified
+ resp_obj += ", \"distance\": "+str(distance)
+ resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
+ resp_obj += ", \"model\": \""+model_name+"\""
+ resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
+ resp_obj += "}"
+
+ resp_obj = json.loads(resp_obj) #string to json
+
+ if bulkProcess == True:
+ resp_objects.append(resp_obj)
+ else:
+ return resp_obj
+ #----------------------
+
+ else:
+ raise ValueError("Invalid arguments passed to verify function: ", instance)
- fig = plt.figure()
- fig.add_subplot(1,2, 1)
- plt.imshow(img1[0][:, :, ::-1])
- plt.xticks([]); plt.yticks([])
- fig.add_subplot(1,2, 2)
- plt.imshow(img2[0][:, :, ::-1])
- plt.xticks([]); plt.yticks([])
- fig.suptitle(label, fontsize=17)
- plt.show(block=True)
-
#-------------------------
toc = time.time()
- resp_obj = "{"
- resp_obj += "\"verified\": "+identified
- resp_obj += ", \"distance\": "+str(distance)
- resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
- resp_obj += ", \"model\": \""+model_name+"\""
- resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
- resp_obj += "}"
-
- resp_obj = json.loads(resp_obj) #string to json
-
#print("identification lasts ",toc-tic," seconds")
- return resp_obj
+ if bulkProcess == True:
+ return resp_objects
def analyze(img_path, actions= []):
- if os.path.isfile(img_path) != True:
- raise ValueError("Confirm that ",img_path," exists")
+ if type(img_path) == list:
+ img_paths = img_path.copy()
+ bulkProcess = True
+ else:
+ img_paths = [img_path]
+ bulkProcess = False
- resp_obj = "{"
+ #---------------------------------
#if a specific target is not passed, then find them all
if len(actions) == 0:
@@ -140,95 +171,123 @@ def analyze(img_path, actions= []):
print("Actions to do: ", actions)
- #TO-DO: do this in parallel
+ #---------------------------------
- pbar = tqdm(range(0,len(actions)), desc='Finding actions')
+ if 'emotion' in actions:
+ emotion_model = Emotion.loadModel()
- action_idx = 0
- #for action in actions:
- for index in pbar:
- action = actions[index]
- pbar.set_description("Action: %s" % (action))
+ if 'age' in actions:
+ age_model = Age.loadModel()
+
+ if 'gender' in actions:
+ gender_model = Gender.loadModel()
+
+ if 'race' in actions:
+ race_model = Race.loadModel()
+ #---------------------------------
+
+ resp_objects = []
+ for img_path in img_paths:
- if action_idx > 0:
- resp_obj += ", "
+ if type(img_path) != str:
+ raise ValueError("You should pass string data type for image paths but you passed ", type(img_path))
- if action == 'emotion':
- emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
- img = functions.detectFace(img_path, (48, 48), True)
-
- model = Emotion.loadModel()
- emotion_predictions = model.predict(img)[0,:]
-
- sum_of_predictions = emotion_predictions.sum()
-
- emotion_obj = "\"emotion\": {"
- for i in range(0, len(emotion_labels)):
- emotion_label = emotion_labels[i]
- emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
-
- if i > 0: emotion_obj += ", "
-
- emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
-
- emotion_obj += "}"
-
- emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
-
- resp_obj += emotion_obj
-
- elif action == 'age':
- img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
- #print("age prediction")
- model = Age.loadModel()
- age_predictions = model.predict(img)[0,:]
- apparent_age = Age.findApparentAge(age_predictions)
-
- resp_obj += "\"age\": %s" % (apparent_age)
-
- elif action == 'gender':
- img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
- #print("gender prediction")
-
- model = Gender.loadModel()
- gender_prediction = model.predict(img)[0,:]
-
- if np.argmax(gender_prediction) == 0:
- gender = "Woman"
- elif np.argmax(gender_prediction) == 1:
- gender = "Man"
-
- resp_obj += "\"gender\": \"%s\"" % (gender)
-
- elif action == 'race':
- img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
- model = Race.loadModel()
- race_predictions = model.predict(img)[0,:]
- race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
-
- sum_of_predictions = race_predictions.sum()
-
- race_obj = "\"race\": {"
- for i in range(0, len(race_labels)):
- race_label = race_labels[i]
- race_prediction = 100 * race_predictions[i] / sum_of_predictions
-
- if i > 0: race_obj += ", "
-
- race_obj += "\"%s\": %s" % (race_label, race_prediction)
-
- race_obj += "}"
- race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
-
- resp_obj += race_obj
+ if os.path.isfile(img_path) != True:
+ raise ValueError("Confirm that ",img_path," exists")
- action_idx = action_idx + 1
+ resp_obj = "{"
+
+ #TO-DO: do this in parallel
+
+ pbar = tqdm(range(0,len(actions)), desc='Finding actions')
+
+ action_idx = 0
+ #for action in actions:
+ for index in pbar:
+ action = actions[index]
+ pbar.set_description("Action: %s" % (action))
+
+ if action_idx > 0:
+ resp_obj += ", "
+
+ if action == 'emotion':
+ emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
+ img = functions.detectFace(img_path, (48, 48), True)
+
+ emotion_predictions = emotion_model.predict(img)[0,:]
+
+ sum_of_predictions = emotion_predictions.sum()
+
+ emotion_obj = "\"emotion\": {"
+ for i in range(0, len(emotion_labels)):
+ emotion_label = emotion_labels[i]
+ emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
+
+ if i > 0: emotion_obj += ", "
+
+ emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
+
+ emotion_obj += "}"
+
+ emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
+
+ resp_obj += emotion_obj
+
+ elif action == 'age':
+ img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
+ #print("age prediction")
+ age_predictions = age_model.predict(img)[0,:]
+ apparent_age = Age.findApparentAge(age_predictions)
+
+ resp_obj += "\"age\": %s" % (apparent_age)
+
+ elif action == 'gender':
+ img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
+ #print("gender prediction")
+
+ gender_prediction = gender_model.predict(img)[0,:]
+
+ if np.argmax(gender_prediction) == 0:
+ gender = "Woman"
+ elif np.argmax(gender_prediction) == 1:
+ gender = "Man"
+
+ resp_obj += "\"gender\": \"%s\"" % (gender)
+
+ elif action == 'race':
+ img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
+ race_predictions = race_model.predict(img)[0,:]
+ race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
+
+ sum_of_predictions = race_predictions.sum()
+
+ race_obj = "\"race\": {"
+ for i in range(0, len(race_labels)):
+ race_label = race_labels[i]
+ race_prediction = 100 * race_predictions[i] / sum_of_predictions
+
+ if i > 0: race_obj += ", "
+
+ race_obj += "\"%s\": %s" % (race_label, race_prediction)
+
+ race_obj += "}"
+ race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
+
+ resp_obj += race_obj
+
+ action_idx = action_idx + 1
+
+ resp_obj += "}"
+
+ resp_obj = json.loads(resp_obj)
+
+ if bulkProcess == True:
+ resp_objects.append(resp_obj)
+ else:
+ return resp_obj
- resp_obj += "}"
-
- resp_obj = json.loads(resp_obj)
-
- return resp_obj
+ if bulkProcess == True:
+ return resp_objects
def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
diff --git a/tests/unit_tests.py b/tests/unit_tests.py
index 046ed5d..82815c7 100644
--- a/tests/unit_tests.py
+++ b/tests/unit_tests.py
@@ -5,6 +5,43 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------
+print("Bulk tests")
+
+print("-----------------------------------------")
+
+print("Bulk face recognition tests")
+
+dataset = [
+ ['dataset/img1.jpg', 'dataset/img2.jpg', True],
+ ['dataset/img5.jpg', 'dataset/img6.jpg', True]
+]
+
+resp_obj = DeepFace.verify(dataset)
+print(resp_obj[0]["verified"] == True)
+print(resp_obj[1]["verified"] == True)
+
+print("-----------------------------------------")
+
+print("Bulk facial analysis tests")
+
+dataset = [
+ 'dataset/img1.jpg',
+ 'dataset/img2.jpg',
+ 'dataset/img5.jpg',
+ 'dataset/img6.jpg'
+]
+
+resp_obj = DeepFace.analyze(dataset)
+print(resp_obj[0]["age"]," years old ", resp_obj[0]["dominant_emotion"], " ",resp_obj[0]["gender"])
+print(resp_obj[1]["age"]," years old ", resp_obj[1]["dominant_emotion"], " ",resp_obj[1]["gender"])
+print(resp_obj[2]["age"]," years old ", resp_obj[2]["dominant_emotion"], " ",resp_obj[2]["gender"])
+print(resp_obj[3]["age"]," years old ", resp_obj[3]["dominant_emotion"], " ",resp_obj[3]["gender"])
+
+
+print("-----------------------------------------")
+
+#-----------------------------------------
+
print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"