list operations supported

This commit is contained in:
Şefik Serangil 2020-03-05 15:20:45 +03:00
parent 01aa0a7a25
commit 171c1a81d7
3 changed files with 265 additions and 157 deletions

View File

@ -34,6 +34,16 @@ print("Is verified: ", result["verified"])
print("Distance: ", result["distance"])
```
Each call of verification function builds a face recognition model scratch and this is a costly operation. If you are going to verify multiple faces sequentially, then you should pass an array to verify function to speed up.
```python
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg'],
['dataset/img5.jpg', 'dataset/img6.jpg']
]
result = DeepFace.verify(dataset)
```
## Face recognition models
Face recognition can be handled by different models. Currently, [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`Google Facenet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/) and [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/) models are supported in deepface. The default configuration verifies faces with **VGG-Face** model. You can set the base model while verification as illustared below. Accuracy and speed show difference based on the performing model.
@ -66,6 +76,8 @@ Deepface also offers facial attribute analysis including [`age`](https://sefiks.
from deepface import DeepFace
demography = DeepFace.analyze("img4.jpg") #passing nothing as 2nd argument will find everything
#demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion']) #identical to the line above
demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"])
```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/test-case-1.jpg" width="20%" height="20%"></p>

View File

@ -17,23 +17,19 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, distance as dst
def verify(img1_path, img2_path
def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
tic = time.time()
if os.path.isfile(img1_path) != True:
raise ValueError("Confirm that ",img1_path," exists")
if type(img1_path) == list:
bulkProcess = True
img_list = img1_path.copy()
else:
bulkProcess = False
img_list = [[img1_path, img2_path]]
if os.path.isfile(img2_path) != True:
raise ValueError("Confirm that ",img2_path," exists")
#-------------------------
#tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
#-------------------------
#------------------------------
if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.")
@ -58,19 +54,39 @@ def verify(img1_path, img2_path
else:
raise ValueError("Invalid model_name passed - ", model_name)
#-------------------------
#crop face
#------------------------------
#tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
#------------------------------
resp_objects = []
for instance in img_list:
if type(instance) == list and len(instance) >= 2:
img1_path = instance[0]
img2_path = instance[1]
#----------------------
if os.path.isfile(img1_path) != True:
raise ValueError("Confirm that ",img1_path," exists")
if os.path.isfile(img2_path) != True:
raise ValueError("Confirm that ",img2_path," exists")
#----------------------
#crop and align faces
img1 = functions.detectFace(img1_path, input_shape)
img2 = functions.detectFace(img2_path, input_shape)
#-------------------------
#----------------------
#find embeddings
img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:]
#-------------------------
#----------------------
#find distances between embeddings
if distance_metric == 'cosine':
@ -82,16 +98,14 @@ def verify(img1_path, img2_path
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
#-------------------------
#----------------------
#decision
if distance <= threshold:
identified = "true"
else:
identified = "false"
#-------------------------
#----------------------
if plot:
label = "Verified: "+identified
label += "\nThreshold: "+str(round(distance, 2))
@ -109,9 +123,8 @@ def verify(img1_path, img2_path
fig.suptitle(label, fontsize=17)
plt.show(block=True)
#-------------------------
toc = time.time()
#----------------------
#response object
resp_obj = "{"
resp_obj += "\"verified\": "+identified
@ -123,16 +136,34 @@ def verify(img1_path, img2_path
resp_obj = json.loads(resp_obj) #string to json
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
#----------------------
else:
raise ValueError("Invalid arguments passed to verify function: ", instance)
#-------------------------
toc = time.time()
#print("identification lasts ",toc-tic," seconds")
return resp_obj
if bulkProcess == True:
return resp_objects
def analyze(img_path, actions= []):
if os.path.isfile(img_path) != True:
raise ValueError("Confirm that ",img_path," exists")
if type(img_path) == list:
img_paths = img_path.copy()
bulkProcess = True
else:
img_paths = [img_path]
bulkProcess = False
resp_obj = "{"
#---------------------------------
#if a specific target is not passed, then find them all
if len(actions) == 0:
@ -140,6 +171,32 @@ def analyze(img_path, actions= []):
print("Actions to do: ", actions)
#---------------------------------
if 'emotion' in actions:
emotion_model = Emotion.loadModel()
if 'age' in actions:
age_model = Age.loadModel()
if 'gender' in actions:
gender_model = Gender.loadModel()
if 'race' in actions:
race_model = Race.loadModel()
#---------------------------------
resp_objects = []
for img_path in img_paths:
if type(img_path) != str:
raise ValueError("You should pass string data type for image paths but you passed ", type(img_path))
if os.path.isfile(img_path) != True:
raise ValueError("Confirm that ",img_path," exists")
resp_obj = "{"
#TO-DO: do this in parallel
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
@ -157,8 +214,7 @@ def analyze(img_path, actions= []):
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, (48, 48), True)
model = Emotion.loadModel()
emotion_predictions = model.predict(img)[0,:]
emotion_predictions = emotion_model.predict(img)[0,:]
sum_of_predictions = emotion_predictions.sum()
@ -180,8 +236,7 @@ def analyze(img_path, actions= []):
elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction")
model = Age.loadModel()
age_predictions = model.predict(img)[0,:]
age_predictions = age_model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age)
@ -190,8 +245,7 @@ def analyze(img_path, actions= []):
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
model = Gender.loadModel()
gender_prediction = model.predict(img)[0,:]
gender_prediction = gender_model.predict(img)[0,:]
if np.argmax(gender_prediction) == 0:
gender = "Woman"
@ -202,8 +256,7 @@ def analyze(img_path, actions= []):
elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
model = Race.loadModel()
race_predictions = model.predict(img)[0,:]
race_predictions = race_model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum()
@ -228,8 +281,14 @@ def analyze(img_path, actions= []):
resp_obj = json.loads(resp_obj)
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
if bulkProcess == True:
return resp_objects
def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb

View File

@ -5,6 +5,43 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------
print("Bulk tests")
print("-----------------------------------------")
print("Bulk face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj[0]["verified"] == True)
print(resp_obj[1]["verified"] == True)
print("-----------------------------------------")
print("Bulk facial analysis tests")
dataset = [
'dataset/img1.jpg',
'dataset/img2.jpg',
'dataset/img5.jpg',
'dataset/img6.jpg'
]
resp_obj = DeepFace.analyze(dataset)
print(resp_obj[0]["age"]," years old ", resp_obj[0]["dominant_emotion"], " ",resp_obj[0]["gender"])
print(resp_obj[1]["age"]," years old ", resp_obj[1]["dominant_emotion"], " ",resp_obj[1]["gender"])
print(resp_obj[2]["age"]," years old ", resp_obj[2]["dominant_emotion"], " ",resp_obj[2]["gender"])
print(resp_obj[3]["age"]," years old ", resp_obj[3]["dominant_emotion"], " ",resp_obj[3]["gender"])
print("-----------------------------------------")
#-----------------------------------------
print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"