mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 03:55:21 +00:00
version 0.0.52
This commit is contained in:
parent
3cd55fcf00
commit
9af7c33ff7
140
api/api.py
140
api/api.py
@ -42,7 +42,7 @@ print("Loading Face Recognition Models...")
|
|||||||
pbar = tqdm(range(0, 6), desc='Loading Face Recognition Models...')
|
pbar = tqdm(range(0, 6), desc='Loading Face Recognition Models...')
|
||||||
|
|
||||||
for index in pbar:
|
for index in pbar:
|
||||||
|
|
||||||
if index == 0:
|
if index == 0:
|
||||||
pbar.set_description("Loading VGG-Face")
|
pbar.set_description("Loading VGG-Face")
|
||||||
vggface_model = DeepFace.build_model("VGG-Face")
|
vggface_model = DeepFace.build_model("VGG-Face")
|
||||||
@ -61,7 +61,7 @@ for index in pbar:
|
|||||||
elif index == 5:
|
elif index == 5:
|
||||||
pbar.set_description("Loading ArcFace DeepFace")
|
pbar.set_description("Loading ArcFace DeepFace")
|
||||||
arcface_model = DeepFace.build_model("ArcFace")
|
arcface_model = DeepFace.build_model("ArcFace")
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
|
|
||||||
print("Face recognition models are built in ", toc-tic," seconds")
|
print("Face recognition models are built in ", toc-tic," seconds")
|
||||||
@ -112,21 +112,21 @@ def index():
|
|||||||
|
|
||||||
@app.route('/analyze', methods=['POST'])
|
@app.route('/analyze', methods=['POST'])
|
||||||
def analyze():
|
def analyze():
|
||||||
|
|
||||||
global graph
|
global graph
|
||||||
|
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
req = request.get_json()
|
req = request.get_json()
|
||||||
trx_id = uuid.uuid4()
|
trx_id = uuid.uuid4()
|
||||||
|
|
||||||
#---------------------------
|
#---------------------------
|
||||||
|
|
||||||
if tf_version == 1:
|
if tf_version == 1:
|
||||||
with graph.as_default():
|
with graph.as_default():
|
||||||
resp_obj = analyzeWrapper(req, trx_id)
|
resp_obj = analyzeWrapper(req, trx_id)
|
||||||
elif tf_version == 2:
|
elif tf_version == 2:
|
||||||
resp_obj = analyzeWrapper(req, trx_id)
|
resp_obj = analyzeWrapper(req, trx_id)
|
||||||
|
|
||||||
#---------------------------
|
#---------------------------
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
@ -145,10 +145,10 @@ def analyzeWrapper(req, trx_id = 0):
|
|||||||
|
|
||||||
for item in raw_content: #item is in type of dict
|
for item in raw_content: #item is in type of dict
|
||||||
instances.append(item)
|
instances.append(item)
|
||||||
|
|
||||||
if len(instances) == 0:
|
if len(instances) == 0:
|
||||||
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
|
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
|
||||||
|
|
||||||
print("Analyzing ", len(instances)," instances")
|
print("Analyzing ", len(instances)," instances")
|
||||||
|
|
||||||
#---------------------------
|
#---------------------------
|
||||||
@ -156,52 +156,52 @@ def analyzeWrapper(req, trx_id = 0):
|
|||||||
actions= ['emotion', 'age', 'gender', 'race']
|
actions= ['emotion', 'age', 'gender', 'race']
|
||||||
if "actions" in list(req.keys()):
|
if "actions" in list(req.keys()):
|
||||||
actions = req["actions"]
|
actions = req["actions"]
|
||||||
|
|
||||||
#---------------------------
|
#---------------------------
|
||||||
|
|
||||||
#resp_obj = DeepFace.analyze(instances, actions=actions)
|
#resp_obj = DeepFace.analyze(instances, actions=actions)
|
||||||
resp_obj = DeepFace.analyze(instances, actions=actions, models=facial_attribute_models)
|
resp_obj = DeepFace.analyze(instances, actions=actions, models=facial_attribute_models)
|
||||||
|
|
||||||
return resp_obj
|
return resp_obj
|
||||||
|
|
||||||
@app.route('/verify', methods=['POST'])
|
@app.route('/verify', methods=['POST'])
|
||||||
def verify():
|
def verify():
|
||||||
|
|
||||||
global graph
|
global graph
|
||||||
|
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
req = request.get_json()
|
req = request.get_json()
|
||||||
trx_id = uuid.uuid4()
|
trx_id = uuid.uuid4()
|
||||||
|
|
||||||
resp_obj = jsonify({'success': False})
|
resp_obj = jsonify({'success': False})
|
||||||
|
|
||||||
if tf_version == 1:
|
if tf_version == 1:
|
||||||
with graph.as_default():
|
with graph.as_default():
|
||||||
resp_obj = verifyWrapper(req, trx_id)
|
resp_obj = verifyWrapper(req, trx_id)
|
||||||
elif tf_version == 2:
|
elif tf_version == 2:
|
||||||
resp_obj = verifyWrapper(req, trx_id)
|
resp_obj = verifyWrapper(req, trx_id)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
|
|
||||||
resp_obj["trx_id"] = trx_id
|
resp_obj["trx_id"] = trx_id
|
||||||
resp_obj["seconds"] = toc-tic
|
resp_obj["seconds"] = toc-tic
|
||||||
|
|
||||||
return resp_obj, 200
|
return resp_obj, 200
|
||||||
|
|
||||||
def verifyWrapper(req, trx_id = 0):
|
def verifyWrapper(req, trx_id = 0):
|
||||||
|
|
||||||
resp_obj = jsonify({'success': False})
|
resp_obj = jsonify({'success': False})
|
||||||
|
|
||||||
model_name = "VGG-Face"; distance_metric = "cosine"
|
model_name = "VGG-Face"; distance_metric = "cosine"
|
||||||
if "model_name" in list(req.keys()):
|
if "model_name" in list(req.keys()):
|
||||||
model_name = req["model_name"]
|
model_name = req["model_name"]
|
||||||
if "distance_metric" in list(req.keys()):
|
if "distance_metric" in list(req.keys()):
|
||||||
distance_metric = req["distance_metric"]
|
distance_metric = req["distance_metric"]
|
||||||
|
|
||||||
#----------------------
|
#----------------------
|
||||||
|
|
||||||
instances = []
|
instances = []
|
||||||
if "img" in list(req.keys()):
|
if "img" in list(req.keys()):
|
||||||
raw_content = req["img"] #list
|
raw_content = req["img"] #list
|
||||||
@ -213,7 +213,7 @@ def verifyWrapper(req, trx_id = 0):
|
|||||||
validate_img1 = False
|
validate_img1 = False
|
||||||
if len(img1) > 11 and img1[0:11] == "data:image/":
|
if len(img1) > 11 and img1[0:11] == "data:image/":
|
||||||
validate_img1 = True
|
validate_img1 = True
|
||||||
|
|
||||||
validate_img2 = False
|
validate_img2 = False
|
||||||
if len(img2) > 11 and img2[0:11] == "data:image/":
|
if len(img2) > 11 and img2[0:11] == "data:image/":
|
||||||
validate_img2 = True
|
validate_img2 = True
|
||||||
@ -223,16 +223,16 @@ def verifyWrapper(req, trx_id = 0):
|
|||||||
|
|
||||||
instance.append(img1); instance.append(img2)
|
instance.append(img1); instance.append(img2)
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
if len(instances) == 0:
|
if len(instances) == 0:
|
||||||
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
|
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
|
||||||
|
|
||||||
print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
|
print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
if model_name == "VGG-Face":
|
if model_name == "VGG-Face":
|
||||||
resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)
|
resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)
|
||||||
elif model_name == "Facenet":
|
elif model_name == "Facenet":
|
||||||
@ -252,13 +252,93 @@ def verifyWrapper(req, trx_id = 0):
|
|||||||
models["OpenFace"] = openface_model
|
models["OpenFace"] = openface_model
|
||||||
models["DeepFace"] = deepface_model
|
models["DeepFace"] = deepface_model
|
||||||
resp_obj = DeepFace.verify(instances, model_name = model_name, model = models)
|
resp_obj = DeepFace.verify(instances, model_name = model_name, model = models)
|
||||||
|
|
||||||
for key in resp_obj: #issue 198.
|
for key in resp_obj: #issue 198.
|
||||||
resp_obj[key]['verified'] = bool(resp_obj[key]['verified'])
|
resp_obj[key]['verified'] = bool(resp_obj[key]['verified'])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
|
resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
|
||||||
|
|
||||||
|
return resp_obj
|
||||||
|
|
||||||
|
@app.route('/represent', methods=['POST'])
|
||||||
|
def represent():
|
||||||
|
|
||||||
|
global graph
|
||||||
|
|
||||||
|
tic = time.time()
|
||||||
|
req = request.get_json()
|
||||||
|
trx_id = uuid.uuid4()
|
||||||
|
|
||||||
|
resp_obj = jsonify({'success': False})
|
||||||
|
|
||||||
|
if tf_version == 1:
|
||||||
|
with graph.as_default():
|
||||||
|
resp_obj = representWrapper(req, trx_id)
|
||||||
|
elif tf_version == 2:
|
||||||
|
resp_obj = representWrapper(req, trx_id)
|
||||||
|
|
||||||
|
#--------------------------
|
||||||
|
|
||||||
|
toc = time.time()
|
||||||
|
|
||||||
|
resp_obj["trx_id"] = trx_id
|
||||||
|
resp_obj["seconds"] = toc-tic
|
||||||
|
|
||||||
|
return resp_obj, 200
|
||||||
|
|
||||||
|
def representWrapper(req, trx_id = 0):
|
||||||
|
|
||||||
|
resp_obj = jsonify({'success': False})
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
#find out model
|
||||||
|
|
||||||
|
model_name = "VGG-Face"; distance_metric = "cosine"
|
||||||
|
|
||||||
|
if "model_name" in list(req.keys()):
|
||||||
|
model_name = req["model_name"]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
#retrieve images from request
|
||||||
|
|
||||||
|
img = ""
|
||||||
|
if "img" in list(req.keys()):
|
||||||
|
img = req["img"] #list
|
||||||
|
#print("img: ", img)
|
||||||
|
|
||||||
|
validate_img = False
|
||||||
|
if len(img) > 11 and img[0:11] == "data:image/":
|
||||||
|
validate_img = True
|
||||||
|
|
||||||
|
if validate_img != True:
|
||||||
|
print("invalid image passed!")
|
||||||
|
return jsonify({'success': False, 'error': 'you must pass img as base64 encoded string'}), 205
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
#cal represent function from the interface
|
||||||
|
|
||||||
|
embedding = []
|
||||||
|
if model_name == "VGG-Face":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = vggface_model)
|
||||||
|
elif model_name == "Facenet":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = facenet_model)
|
||||||
|
elif model_name == "OpenFace":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = openface_model)
|
||||||
|
elif model_name == "DeepFace":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = deepface_model)
|
||||||
|
elif model_name == "DeepID":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = deepid_model)
|
||||||
|
elif model_name == "ArcFace":
|
||||||
|
embedding = DeepFace.represent(img, model_name = model_name, model = arcface_model)
|
||||||
|
else:
|
||||||
|
resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
|
||||||
|
|
||||||
|
#print("embedding is ", len(embedding)," dimensional vector")
|
||||||
|
resp_obj = {}
|
||||||
|
resp_obj["embedding"] = embedding
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
return resp_obj
|
return resp_obj
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
File diff suppressed because one or more lines are too long
@ -10,48 +10,47 @@ tf_version = int(tf.__version__.split(".")[0])
|
|||||||
if tf_version == 1:
|
if tf_version == 1:
|
||||||
import keras
|
import keras
|
||||||
from keras.models import Model, Sequential
|
from keras.models import Model, Sequential
|
||||||
from keras.layers import Convolution2D, Flatten, Activation
|
from keras.layers import Convolution2D, Flatten, Activation
|
||||||
elif tf_version == 2:
|
elif tf_version == 2:
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
||||||
|
|
||||||
def loadModel():
|
def loadModel(url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'):
|
||||||
|
|
||||||
model = VGGFace.baseModel()
|
model = VGGFace.baseModel()
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
classes = 101
|
classes = 101
|
||||||
base_model_output = Sequential()
|
base_model_output = Sequential()
|
||||||
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
||||||
base_model_output = Flatten()(base_model_output)
|
base_model_output = Flatten()(base_model_output)
|
||||||
base_model_output = Activation('softmax')(base_model_output)
|
base_model_output = Activation('softmax')(base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
age_model = Model(inputs=model.input, outputs=base_model_output)
|
age_model = Model(inputs=model.input, outputs=base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
#load weights
|
#load weights
|
||||||
|
|
||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
|
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
|
||||||
print("age_model_weights.h5 will be downloaded...")
|
print("age_model_weights.h5 will be downloaded...")
|
||||||
|
|
||||||
url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
|
|
||||||
output = home+'/.deepface/weights/age_model_weights.h5'
|
output = home+'/.deepface/weights/age_model_weights.h5'
|
||||||
gdown.download(url, output, quiet=False)
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
|
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
|
||||||
|
|
||||||
return age_model
|
return age_model
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
def findApparentAge(age_predictions):
|
def findApparentAge(age_predictions):
|
||||||
output_indexes = np.array([i for i in range(0, 101)])
|
output_indexes = np.array([i for i in range(0, 101)])
|
||||||
apparent_age = np.sum(age_predictions * output_indexes)
|
apparent_age = np.sum(age_predictions * output_indexes)
|
||||||
return apparent_age
|
return apparent_age
|
||||||
|
@ -14,11 +14,11 @@ elif tf_version == 2:
|
|||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
|
||||||
|
|
||||||
def loadModel():
|
def loadModel(url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'):
|
||||||
|
|
||||||
num_classes = 7
|
num_classes = 7
|
||||||
|
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
|
|
||||||
#1st convolution layer
|
#1st convolution layer
|
||||||
@ -44,29 +44,24 @@ def loadModel():
|
|||||||
model.add(Dropout(0.2))
|
model.add(Dropout(0.2))
|
||||||
|
|
||||||
model.add(Dense(num_classes, activation='softmax'))
|
model.add(Dense(num_classes, activation='softmax'))
|
||||||
|
|
||||||
#----------------------------
|
#----------------------------
|
||||||
|
|
||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/facial_expression_model_weights.h5') != True:
|
if os.path.isfile(home+'/.deepface/weights/facial_expression_model_weights.h5') != True:
|
||||||
print("facial_expression_model_weights.h5 will be downloaded...")
|
print("facial_expression_model_weights.h5 will be downloaded...")
|
||||||
|
|
||||||
#TO-DO: upload weights to google drive
|
#TO-DO: upload weights to google drive
|
||||||
|
|
||||||
#zip
|
#zip
|
||||||
url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'
|
|
||||||
output = home+'/.deepface/weights/facial_expression_model_weights.zip'
|
output = home+'/.deepface/weights/facial_expression_model_weights.zip'
|
||||||
gdown.download(url, output, quiet=False)
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
#unzip facial_expression_model_weights.zip
|
#unzip facial_expression_model_weights.zip
|
||||||
with zipfile.ZipFile(output, 'r') as zip_ref:
|
with zipfile.ZipFile(output, 'r') as zip_ref:
|
||||||
zip_ref.extractall(home+'/.deepface/weights/')
|
zip_ref.extractall(home+'/.deepface/weights/')
|
||||||
|
|
||||||
model.load_weights(home+'/.deepface/weights/facial_expression_model_weights.h5')
|
model.load_weights(home+'/.deepface/weights/facial_expression_model_weights.h5')
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
#----------------------------
|
|
||||||
|
|
||||||
return 0
|
|
@ -14,37 +14,36 @@ elif tf_version == 2:
|
|||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
||||||
|
|
||||||
def loadModel():
|
def loadModel(url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'):
|
||||||
|
|
||||||
model = VGGFace.baseModel()
|
model = VGGFace.baseModel()
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
classes = 2
|
classes = 2
|
||||||
base_model_output = Sequential()
|
base_model_output = Sequential()
|
||||||
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
||||||
base_model_output = Flatten()(base_model_output)
|
base_model_output = Flatten()(base_model_output)
|
||||||
base_model_output = Activation('softmax')(base_model_output)
|
base_model_output = Activation('softmax')(base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
gender_model = Model(inputs=model.input, outputs=base_model_output)
|
gender_model = Model(inputs=model.input, outputs=base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
#load weights
|
#load weights
|
||||||
|
|
||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
|
if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
|
||||||
print("gender_model_weights.h5 will be downloaded...")
|
print("gender_model_weights.h5 will be downloaded...")
|
||||||
|
|
||||||
url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
|
|
||||||
output = home+'/.deepface/weights/gender_model_weights.h5'
|
output = home+'/.deepface/weights/gender_model_weights.h5'
|
||||||
gdown.download(url, output, quiet=False)
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
|
gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
|
||||||
|
|
||||||
return gender_model
|
return gender_model
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
@ -16,42 +16,41 @@ elif tf_version == 2:
|
|||||||
from tensorflow.keras.models import Model, Sequential
|
from tensorflow.keras.models import Model, Sequential
|
||||||
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
|
||||||
|
|
||||||
def loadModel():
|
def loadModel(url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'):
|
||||||
|
|
||||||
model = VGGFace.baseModel()
|
model = VGGFace.baseModel()
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
classes = 6
|
classes = 6
|
||||||
base_model_output = Sequential()
|
base_model_output = Sequential()
|
||||||
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
|
||||||
base_model_output = Flatten()(base_model_output)
|
base_model_output = Flatten()(base_model_output)
|
||||||
base_model_output = Activation('softmax')(base_model_output)
|
base_model_output = Activation('softmax')(base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
race_model = Model(inputs=model.input, outputs=base_model_output)
|
race_model = Model(inputs=model.input, outputs=base_model_output)
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
|
||||||
#load weights
|
#load weights
|
||||||
|
|
||||||
home = str(Path.home())
|
home = str(Path.home())
|
||||||
|
|
||||||
if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
|
if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
|
||||||
print("race_model_single_batch.h5 will be downloaded...")
|
print("race_model_single_batch.h5 will be downloaded...")
|
||||||
|
|
||||||
#zip
|
#zip
|
||||||
url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
|
|
||||||
output = home+'/.deepface/weights/race_model_single_batch.zip'
|
output = home+'/.deepface/weights/race_model_single_batch.zip'
|
||||||
gdown.download(url, output, quiet=False)
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
#unzip race_model_single_batch.zip
|
#unzip race_model_single_batch.zip
|
||||||
with zipfile.ZipFile(output, 'r') as zip_ref:
|
with zipfile.ZipFile(output, 'r') as zip_ref:
|
||||||
zip_ref.extractall(home+'/.deepface/weights/')
|
zip_ref.extractall(home+'/.deepface/weights/')
|
||||||
|
|
||||||
race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')
|
race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')
|
||||||
|
|
||||||
return race_model
|
return race_model
|
||||||
|
|
||||||
#--------------------------
|
#--------------------------
|
||||||
|
Loading…
x
Reference in New Issue
Block a user