Merge pull request #17 from uriafranko/uria-franko

passing already built models to analyze function as well
This commit is contained in:
Sefik Ilkin Serengil 2020-04-17 15:20:10 +03:00 committed by GitHub
commit fc70aa0aad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 95 additions and 77 deletions

1
Contributors.md Normal file
View File

@ -0,0 +1 @@
uriafranko

View File

@ -20,73 +20,74 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst from deepface.commons import functions, realtime, distance as dst
def verify(img1_path, img2_path='' def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', model = None): , model_name ='VGG-Face', distance_metric = 'cosine', model = None):
tic = time.time() tic = time.time()
if type(img1_path) == list: if type(img1_path) == list:
bulkProcess = True bulkProcess = True
img_list = img1_path.copy() img_list = img1_path.copy()
else: else:
bulkProcess = False bulkProcess = False
img_list = [[img1_path, img2_path]] img_list = [[img1_path, img2_path]]
#------------------------------ #------------------------------
if model == None: if model == None:
if model_name == 'VGG-Face': if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.") print("Using VGG-Face model backend and", distance_metric,"distance.")
model = VGGFace.loadModel() model = VGGFace.loadModel()
elif model_name == 'OpenFace': elif model_name == 'OpenFace':
print("Using OpenFace model backend", distance_metric,"distance.") print("Using OpenFace model backend", distance_metric,"distance.")
model = OpenFace.loadModel() model = OpenFace.loadModel()
elif model_name == 'Facenet': elif model_name == 'Facenet':
print("Using Facenet model backend", distance_metric,"distance.") print("Using Facenet model backend", distance_metric,"distance.")
model = Facenet.loadModel() model = Facenet.loadModel()
elif model_name == 'DeepFace': elif model_name == 'DeepFace':
print("Using FB DeepFace model backend", distance_metric,"distance.") print("Using FB DeepFace model backend", distance_metric,"distance.")
model = FbDeepFace.loadModel() model = FbDeepFace.loadModel()
else: else:
raise ValueError("Invalid model_name passed - ", model_name) raise ValueError("Invalid model_name passed - ", model_name)
else: #model != None else: #model != None
print("Already built model is passed") print("Already built model is passed")
#------------------------------ #------------------------------
#face recognition models have different size of inputs #face recognition models have different size of inputs
input_shape = model.layers[0].input_shape[1:3] input_shape = model.layers[0].input_shape[1:3]
#------------------------------ #------------------------------
#tuned thresholds for model and metric pair #tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric) threshold = functions.findThreshold(model_name, distance_metric)
#------------------------------ #------------------------------
resp_objects = [] resp_objects = []
for instance in img_list: for instance in img_list:
if type(instance) == list and len(instance) >= 2: if type(instance) == list and len(instance) >= 2:
img1_path = instance[0] img1_path = instance[0]
img2_path = instance[1] img2_path = instance[1]
#---------------------- #----------------------
#crop and align faces #crop and align faces
img1 = functions.detectFace(img1_path, input_shape) img1 = functions.detectFace(img1_path, input_shape)
img2 = functions.detectFace(img2_path, input_shape) img2 = functions.detectFace(img2_path, input_shape)
#---------------------- #----------------------
#find embeddings #find embeddings
img1_representation = model.predict(img1)[0,:] img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:] img2_representation = model.predict(img2)[0,:]
#---------------------- #----------------------
#find distances between embeddings #find distances between embeddings
if distance_metric == 'cosine': if distance_metric == 'cosine':
distance = dst.findCosineDistance(img1_representation, img2_representation) distance = dst.findCosineDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean': elif distance_metric == 'euclidean':
@ -95,18 +96,18 @@ def verify(img1_path, img2_path=''
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
else: else:
raise ValueError("Invalid distance_metric passed - ", distance_metric) raise ValueError("Invalid distance_metric passed - ", distance_metric)
#---------------------- #----------------------
#decision #decision
if distance <= threshold: if distance <= threshold:
identified = "true" identified = "true"
else: else:
identified = "false" identified = "false"
#---------------------- #----------------------
#response object #response object
resp_obj = "{" resp_obj = "{"
resp_obj += "\"verified\": "+identified resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance) resp_obj += ", \"distance\": "+str(distance)
@ -114,30 +115,30 @@ def verify(img1_path, img2_path=''
resp_obj += ", \"model\": \""+model_name+"\"" resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\"" resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}" resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json resp_obj = json.loads(resp_obj) #string to json
if bulkProcess == True: if bulkProcess == True:
resp_objects.append(resp_obj) resp_objects.append(resp_obj)
else: else:
K.clear_session() K.clear_session()
return resp_obj return resp_obj
#---------------------- #----------------------
else: else:
raise ValueError("Invalid arguments passed to verify function: ", instance) raise ValueError("Invalid arguments passed to verify function: ", instance)
#------------------------- #-------------------------
toc = time.time() toc = time.time()
#print("identification lasts ",toc-tic," seconds") #print("identification lasts ",toc-tic," seconds")
if bulkProcess == True: if bulkProcess == True:
resp_obj = "{" resp_obj = "{"
for i in range(0, len(resp_objects)): for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i]) resp_item = json.dumps(resp_objects[i])
if i > 0: if i > 0:
resp_obj += ", " resp_obj += ", "
@ -148,7 +149,8 @@ def verify(img1_path, img2_path=''
return resp_obj return resp_obj
#return resp_objects #return resp_objects
def analyze(img_path, actions= []):
def analyze(img_path, actions= [], models= {}):
if type(img_path) == list: if type(img_path) == list:
img_paths = img_path.copy() img_paths = img_path.copy()
@ -156,129 +158,141 @@ def analyze(img_path, actions= []):
else: else:
img_paths = [img_path] img_paths = [img_path]
bulkProcess = False bulkProcess = False
#--------------------------------- #---------------------------------
#if a specific target is not passed, then find them all #if a specific target is not passed, then find them all
if len(actions) == 0: if len(actions) == 0:
actions= ['emotion', 'age', 'gender', 'race'] actions= ['emotion', 'age', 'gender', 'race']
print("Actions to do: ", actions) print("Actions to do: ", actions)
#--------------------------------- #---------------------------------
if 'emotion' in actions: if 'emotion' in actions:
emotion_model = Emotion.loadModel() if 'emotion' in models:
emotion_model = models['emotion']
else:
emotion_model = Emotion.loadModel()
if 'age' in actions: if 'age' in actions:
age_model = Age.loadModel() if 'age' in models:
age_model = models['age']
else:
age_model = Age.loadModel()
if 'gender' in actions: if 'gender' in actions:
gender_model = Gender.loadModel() if 'gender' in models:
gender_model = models['gender']
else:
gender_model = Gender.loadModel()
if 'race' in actions: if 'race' in actions:
race_model = Race.loadModel() if 'race' in models:
race_model = models['race']
else:
race_model = Race.loadModel()
#--------------------------------- #---------------------------------
resp_objects = [] resp_objects = []
for img_path in img_paths: for img_path in img_paths:
resp_obj = "{" resp_obj = "{"
#TO-DO: do this in parallel #TO-DO: do this in parallel
pbar = tqdm(range(0,len(actions)), desc='Finding actions') pbar = tqdm(range(0,len(actions)), desc='Finding actions')
action_idx = 0 action_idx = 0
#for action in actions: #for action in actions:
for index in pbar: for index in pbar:
action = actions[index] action = actions[index]
pbar.set_description("Action: %s" % (action)) pbar.set_description("Action: %s" % (action))
if action_idx > 0: if action_idx > 0:
resp_obj += ", " resp_obj += ", "
if action == 'emotion': if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, (48, 48), True) img = functions.detectFace(img_path, (48, 48), True)
emotion_predictions = emotion_model.predict(img)[0,:] emotion_predictions = emotion_model.predict(img)[0,:]
sum_of_predictions = emotion_predictions.sum() sum_of_predictions = emotion_predictions.sum()
emotion_obj = "\"emotion\": {" emotion_obj = "\"emotion\": {"
for i in range(0, len(emotion_labels)): for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i] emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if i > 0: emotion_obj += ", " if i > 0: emotion_obj += ", "
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction) emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
emotion_obj += "}" emotion_obj += "}"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)]) emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
resp_obj += emotion_obj resp_obj += emotion_obj
elif action == 'age': elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction") #print("age prediction")
age_predictions = age_model.predict(img)[0,:] age_predictions = age_model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions) apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age) resp_obj += "\"age\": %s" % (apparent_age)
elif action == 'gender': elif action == 'gender':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction") #print("gender prediction")
gender_prediction = gender_model.predict(img)[0,:] gender_prediction = gender_model.predict(img)[0,:]
if np.argmax(gender_prediction) == 0: if np.argmax(gender_prediction) == 0:
gender = "Woman" gender = "Woman"
elif np.argmax(gender_prediction) == 1: elif np.argmax(gender_prediction) == 1:
gender = "Man" gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender) resp_obj += "\"gender\": \"%s\"" % (gender)
elif action == 'race': elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
race_predictions = race_model.predict(img)[0,:] race_predictions = race_model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic'] race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum() sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {" race_obj = "\"race\": {"
for i in range(0, len(race_labels)): for i in range(0, len(race_labels)):
race_label = race_labels[i] race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", " if i > 0: race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction) race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += "}" race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)]) race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
resp_obj += race_obj resp_obj += race_obj
action_idx = action_idx + 1 action_idx = action_idx + 1
resp_obj += "}" resp_obj += "}"
resp_obj = json.loads(resp_obj) resp_obj = json.loads(resp_obj)
if bulkProcess == True: if bulkProcess == True:
resp_objects.append(resp_obj) resp_objects.append(resp_obj)
else: else:
return resp_obj return resp_obj
if bulkProcess == True: if bulkProcess == True:
resp_obj = "{" resp_obj = "{"
for i in range(0, len(resp_objects)): for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i]) resp_item = json.dumps(resp_objects[i])
if i > 0: if i > 0:
resp_obj += ", " resp_obj += ", "
@ -289,13 +303,16 @@ def analyze(img_path, actions= []):
return resp_obj return resp_obj
#return resp_objects #return resp_objects
def detectFace(img_path): def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3) img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb return img[:, :, ::-1] #bgr to rgb
def stream(db_path, model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True): def stream(db_path, model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True):
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis) realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis)
#--------------------------- #---------------------------
functions.initializeFolder() functions.initializeFolder()