version 0.0.52

This commit is contained in:
Sefik Ilkin Serengil 2021-05-27 22:41:51 +03:00
parent 3cd55fcf00
commit 9af7c33ff7
6 changed files with 198 additions and 94 deletions

View File

@ -42,7 +42,7 @@ print("Loading Face Recognition Models...")
pbar = tqdm(range(0, 6), desc='Loading Face Recognition Models...')
for index in pbar:
if index == 0:
pbar.set_description("Loading VGG-Face")
vggface_model = DeepFace.build_model("VGG-Face")
@ -61,7 +61,7 @@ for index in pbar:
elif index == 5:
pbar.set_description("Loading ArcFace DeepFace")
arcface_model = DeepFace.build_model("ArcFace")
toc = time.time()
print("Face recognition models are built in ", toc-tic," seconds")
@ -112,21 +112,21 @@ def index():
@app.route('/analyze', methods=['POST'])
def analyze():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
#---------------------------
if tf_version == 1:
with graph.as_default():
resp_obj = analyzeWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = analyzeWrapper(req, trx_id)
#---------------------------
toc = time.time()
@ -145,10 +145,10 @@ def analyzeWrapper(req, trx_id = 0):
for item in raw_content: #item is in type of dict
instances.append(item)
if len(instances) == 0:
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
print("Analyzing ", len(instances)," instances")
#---------------------------
@ -156,52 +156,52 @@ def analyzeWrapper(req, trx_id = 0):
actions= ['emotion', 'age', 'gender', 'race']
if "actions" in list(req.keys()):
actions = req["actions"]
#---------------------------
#resp_obj = DeepFace.analyze(instances, actions=actions)
resp_obj = DeepFace.analyze(instances, actions=actions, models=facial_attribute_models)
return resp_obj
@app.route('/verify', methods=['POST'])
def verify():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
resp_obj = jsonify({'success': False})
if tf_version == 1:
with graph.as_default():
resp_obj = verifyWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = verifyWrapper(req, trx_id)
#--------------------------
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj, 200
def verifyWrapper(req, trx_id = 0):
resp_obj = jsonify({'success': False})
model_name = "VGG-Face"; distance_metric = "cosine"
if "model_name" in list(req.keys()):
model_name = req["model_name"]
if "distance_metric" in list(req.keys()):
distance_metric = req["distance_metric"]
#----------------------
instances = []
if "img" in list(req.keys()):
raw_content = req["img"] #list
@ -213,7 +213,7 @@ def verifyWrapper(req, trx_id = 0):
validate_img1 = False
if len(img1) > 11 and img1[0:11] == "data:image/":
validate_img1 = True
validate_img2 = False
if len(img2) > 11 and img2[0:11] == "data:image/":
validate_img2 = True
@ -223,16 +223,16 @@ def verifyWrapper(req, trx_id = 0):
instance.append(img1); instance.append(img2)
instances.append(instance)
#--------------------------
if len(instances) == 0:
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
#--------------------------
if model_name == "VGG-Face":
resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)
elif model_name == "Facenet":
@ -252,13 +252,93 @@ def verifyWrapper(req, trx_id = 0):
models["OpenFace"] = openface_model
models["DeepFace"] = deepface_model
resp_obj = DeepFace.verify(instances, model_name = model_name, model = models)
for key in resp_obj: #issue 198.
resp_obj[key]['verified'] = bool(resp_obj[key]['verified'])
else:
resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
return resp_obj
@app.route('/represent', methods=['POST'])
def represent():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
resp_obj = jsonify({'success': False})
if tf_version == 1:
with graph.as_default():
resp_obj = representWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = representWrapper(req, trx_id)
#--------------------------
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj, 200
def representWrapper(req, trx_id = 0):
resp_obj = jsonify({'success': False})
#-------------------------------------
#find out model
model_name = "VGG-Face"; distance_metric = "cosine"
if "model_name" in list(req.keys()):
model_name = req["model_name"]
#-------------------------------------
#retrieve images from request
img = ""
if "img" in list(req.keys()):
img = req["img"] #list
#print("img: ", img)
validate_img = False
if len(img) > 11 and img[0:11] == "data:image/":
validate_img = True
if validate_img != True:
print("invalid image passed!")
return jsonify({'success': False, 'error': 'you must pass img as base64 encoded string'}), 205
#-------------------------------------
#cal represent function from the interface
embedding = []
if model_name == "VGG-Face":
embedding = DeepFace.represent(img, model_name = model_name, model = vggface_model)
elif model_name == "Facenet":
embedding = DeepFace.represent(img, model_name = model_name, model = facenet_model)
elif model_name == "OpenFace":
embedding = DeepFace.represent(img, model_name = model_name, model = openface_model)
elif model_name == "DeepFace":
embedding = DeepFace.represent(img, model_name = model_name, model = deepface_model)
elif model_name == "DeepID":
embedding = DeepFace.represent(img, model_name = model_name, model = deepid_model)
elif model_name == "ArcFace":
embedding = DeepFace.represent(img, model_name = model_name, model = arcface_model)
else:
resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
#print("embedding is ", len(embedding)," dimensional vector")
resp_obj = {}
resp_obj["embedding"] = embedding
#-------------------------------------
return resp_obj
if __name__ == '__main__':

File diff suppressed because one or more lines are too long

View File

@ -10,48 +10,47 @@ tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
def loadModel():
def loadModel(url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'):
model = VGGFace.baseModel()
#--------------------------
classes = 101
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
#--------------------------
age_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
#load weights
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
print("age_model_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
output = home+'/.deepface/weights/age_model_weights.h5'
gdown.download(url, output, quiet=False)
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
return age_model
#--------------------------
def findApparentAge(age_predictions):
output_indexes = np.array([i for i in range(0, 101)])
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age
return apparent_age

View File

@ -14,11 +14,11 @@ elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
def loadModel():
def loadModel(url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'):
num_classes = 7
model = Sequential()
#1st convolution layer
@ -44,29 +44,24 @@ def loadModel():
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
#----------------------------
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/facial_expression_model_weights.h5') != True:
print("facial_expression_model_weights.h5 will be downloaded...")
#TO-DO: upload weights to google drive
#zip
url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'
output = home+'/.deepface/weights/facial_expression_model_weights.zip'
gdown.download(url, output, quiet=False)
#unzip facial_expression_model_weights.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
model.load_weights(home+'/.deepface/weights/facial_expression_model_weights.h5')
return model
#----------------------------
return 0

View File

@ -14,37 +14,36 @@ elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
def loadModel():
def loadModel(url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'):
model = VGGFace.baseModel()
#--------------------------
classes = 2
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
#--------------------------
gender_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
#load weights
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
print("gender_model_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
output = home+'/.deepface/weights/gender_model_weights.h5'
gdown.download(url, output, quiet=False)
gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
return gender_model
#--------------------------
#--------------------------

View File

@ -16,42 +16,41 @@ elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
def loadModel():
def loadModel(url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'):
model = VGGFace.baseModel()
#--------------------------
classes = 6
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
#--------------------------
race_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
#load weights
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
print("race_model_single_batch.h5 will be downloaded...")
#zip
url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
output = home+'/.deepface/weights/race_model_single_batch.zip'
gdown.download(url, output, quiet=False)
#unzip race_model_single_batch.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')
return race_model
#--------------------------