diff --git a/api/app.py b/api/app.py
deleted file mode 100644
index 293859b..0000000
--- a/api/app.py
+++ /dev/null
@@ -1,121 +0,0 @@
-from flask import Flask, jsonify, request, make_response
-
-import uuid
-import json
-import time
-
-from deepface import DeepFace
-
-#------------------------------
-
-app = Flask(__name__)
-
-#------------------------------
-#Service API Interface
-
-@app.route('/')
-def index():
- return '
Hello, world!
'
-
-@app.route('/analyze', methods=['POST'])
-def analyze():
-
- req = request.get_json()
- trx_id = uuid.uuid4()
-
- #---------------------------
-
- tic = time.time()
-
- instances = []
- if "img" in list(req.keys()):
- raw_content = req["img"] #list
-
- for item in raw_content: #item is in type of dict
- instances.append(item)
-
- if len(instances) == 0:
- return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
-
- print("Analyzing ", len(instances)," instances")
-
- #---------------------------
-
- actions= ['emotion', 'age', 'gender', 'race']
- if "actions" in list(req.keys()):
- actions = req["actions"]
-
- #---------------------------
-
- resp_obj = DeepFace.analyze(instances, actions=actions)
-
- #---------------------------
-
- toc = time.time()
-
- resp_obj["trx_id"] = trx_id
- resp_obj["seconds"] = toc-tic
-
- return resp_obj
-
-@app.route('/verify', methods=['POST'])
-
-def verify():
-
- req = request.get_json()
- trx_id = uuid.uuid4()
-
- tic = time.time()
-
- #-------------------------
-
- model_name = "VGG-Face"; distance_metric = "cosine"
- if "model_name" in list(req.keys()):
- model_name = req["model_name"]
- if "distance_metric" in list(req.keys()):
- distance_metric = req["distance_metric"]
-
- instances = []
- if "img" in list(req.keys()):
- raw_content = req["img"] #list
-
- for item in raw_content: #item is in type of dict
- instance = []
- img1 = item["img1"]; img2 = item["img2"]
-
- validate_img1 = False
- if len(img1) > 11 and img1[0:11] == "data:image/":
- validate_img1 = True
-
- validate_img2 = False
- if len(img2) > 11 and img2[0:11] == "data:image/":
- validate_img2 = True
-
- if validate_img1 != True or validate_img2 != True:
- return jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205
-
- instance.append(img1); instance.append(img2)
- instances.append(instance)
-
- #--------------------------
-
- if len(instances) == 0:
- return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
-
- print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
-
- #--------------------------
- resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric)
-
- toc = time.time()
-
- resp_obj["trx_id"] = trx_id
- resp_obj["seconds"] = toc-tic
-
- #--------------------------
-
- return resp_obj, 200
-
-if __name__ == '__main__':
-
- app.run()
\ No newline at end of file
diff --git a/api/postman/deepface.postman_collection.json b/api_request/deepface.postman_collection.json
similarity index 100%
rename from api/postman/deepface.postman_collection.json
rename to api_request/deepface.postman_collection.json
diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py
index d873657..dfff493 100644
--- a/deepface/DeepFace.py
+++ b/deepface/DeepFace.py
@@ -9,6 +9,8 @@ from tqdm import tqdm
import json
import cv2
from keras import backend as K
+import keras
+import tensorflow as tf
#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#from extendedmodels import Age, Gender, Race, Emotion
@@ -18,14 +20,10 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst
-#TO-DO: pass built model optionally as input. I will get complex models up in rest api once and call these functions directly
-
def verify(img1_path, img2_path=''
- , model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
+ , model_name ='VGG-Face', distance_metric = 'cosine', model = None):
tic = time.time()
-
- K.clear_session()
if type(img1_path) == list:
bulkProcess = True
@@ -36,28 +34,31 @@ def verify(img1_path, img2_path=''
#------------------------------
- if model_name == 'VGG-Face':
- print("Using VGG-Face model backend and", distance_metric,"distance.")
- model = VGGFace.loadModel()
- input_shape = (224, 224)
+ if model == None:
+ if model_name == 'VGG-Face':
+ print("Using VGG-Face model backend and", distance_metric,"distance.")
+ model = VGGFace.loadModel()
+
+ elif model_name == 'OpenFace':
+ print("Using OpenFace model backend", distance_metric,"distance.")
+ model = OpenFace.loadModel()
+
+ elif model_name == 'Facenet':
+ print("Using Facenet model backend", distance_metric,"distance.")
+ model = Facenet.loadModel()
+
+ elif model_name == 'DeepFace':
+ print("Using FB DeepFace model backend", distance_metric,"distance.")
+ model = FbDeepFace.loadModel()
+
+ else:
+ raise ValueError("Invalid model_name passed - ", model_name)
+ else: #model != None
+ print("Already built model is passed")
- elif model_name == 'OpenFace':
- print("Using OpenFace model backend", distance_metric,"distance.")
- model = OpenFace.loadModel()
- input_shape = (96, 96)
-
- elif model_name == 'Facenet':
- print("Using Facenet model backend", distance_metric,"distance.")
- model = Facenet.loadModel()
- input_shape = (160, 160)
-
- elif model_name == 'DeepFace':
- print("Using FB DeepFace model backend", distance_metric,"distance.")
- model = FbDeepFace.loadModel()
- input_shape = (152, 152)
-
- else:
- raise ValueError("Invalid model_name passed - ", model_name)
+ #------------------------------
+ #face recognition models have different size of inputs
+ input_shape = model.layers[0].input_shape[1:3]
#------------------------------
@@ -79,7 +80,7 @@ def verify(img1_path, img2_path=''
#----------------------
#find embeddings
-
+
img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:]
@@ -133,8 +134,6 @@ def verify(img1_path, img2_path=''
#print("identification lasts ",toc-tic," seconds")
if bulkProcess == True:
- K.clear_session()
-
resp_obj = "{"
for i in range(0, len(resp_objects)):
@@ -151,8 +150,6 @@ def verify(img1_path, img2_path=''
def analyze(img_path, actions= []):
- K.clear_session()
-
if type(img_path) == list:
img_paths = img_path.copy()
bulkProcess = True
@@ -275,12 +272,9 @@ def analyze(img_path, actions= []):
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
- K.clear_session()
return resp_obj
if bulkProcess == True:
- K.clear_session()
-
resp_obj = "{"
for i in range(0, len(resp_objects)):
diff --git a/deepface/app.py b/deepface/app.py
new file mode 100644
index 0000000..aa8f399
--- /dev/null
+++ b/deepface/app.py
@@ -0,0 +1,165 @@
+from flask import Flask, jsonify, request, make_response
+
+import uuid
+import json
+import time
+
+import tensorflow as tf
+
+from deepface import DeepFace
+from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
+
+#import DeepFace
+#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
+
+#------------------------------
+
+app = Flask(__name__)
+
+tic = time.time()
+
+vggface_model = VGGFace.loadModel()
+print("VGG-Face model is built.")
+
+openface_model = OpenFace.loadModel()
+print("OpenFace model is built")
+
+facenet_model = Facenet.loadModel()
+print("FaceNet model is built")
+
+deepface_model = FbDeepFace.loadModel()
+print("DeepFace model is built")
+
+toc = time.time()
+
+print("Face recognition models are built in ", toc-tic," seconds")
+
+graph = tf.get_default_graph()
+
+#------------------------------
+#Service API Interface
+
+@app.route('/')
+def index():
+ return 'Hello, world!
'
+
+@app.route('/analyze', methods=['POST'])
+def analyze():
+
+ global graph
+
+ tic = time.time()
+ req = request.get_json()
+ trx_id = uuid.uuid4()
+
+ #---------------------------
+
+ resp_obj = jsonify({'success': False})
+ with graph.as_default():
+ instances = []
+ if "img" in list(req.keys()):
+ raw_content = req["img"] #list
+
+ for item in raw_content: #item is in type of dict
+ instances.append(item)
+
+ if len(instances) == 0:
+ return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
+
+ print("Analyzing ", len(instances)," instances")
+
+ #---------------------------
+
+ actions= ['emotion', 'age', 'gender', 'race']
+ if "actions" in list(req.keys()):
+ actions = req["actions"]
+
+ #---------------------------
+
+ resp_obj = DeepFace.analyze(instances, actions=actions)
+
+ #---------------------------
+
+ toc = time.time()
+
+ resp_obj["trx_id"] = trx_id
+ resp_obj["seconds"] = toc-tic
+
+ return resp_obj
+
+@app.route('/verify', methods=['POST'])
+
+def verify():
+
+ global graph
+
+ tic = time.time()
+ req = request.get_json()
+ trx_id = uuid.uuid4()
+
+ resp_obj = jsonify({'success': False})
+
+ with graph.as_default():
+
+ model_name = "VGG-Face"; distance_metric = "cosine"
+ if "model_name" in list(req.keys()):
+ model_name = req["model_name"]
+ if "distance_metric" in list(req.keys()):
+ distance_metric = req["distance_metric"]
+
+ #----------------------
+
+ instances = []
+ if "img" in list(req.keys()):
+ raw_content = req["img"] #list
+
+ for item in raw_content: #item is in type of dict
+ instance = []
+ img1 = item["img1"]; img2 = item["img2"]
+
+ validate_img1 = False
+ if len(img1) > 11 and img1[0:11] == "data:image/":
+ validate_img1 = True
+
+ validate_img2 = False
+ if len(img2) > 11 and img2[0:11] == "data:image/":
+ validate_img2 = True
+
+ if validate_img1 != True or validate_img2 != True:
+ return jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205
+
+ instance.append(img1); instance.append(img2)
+ instances.append(instance)
+
+ #--------------------------
+
+ if len(instances) == 0:
+ return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
+
+ print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
+
+ #--------------------------
+
+ if model_name == "VGG-Face":
+ resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)
+ elif model_name == "Facenet":
+ resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = facenet_model)
+ elif model_name == "OpenFace":
+ resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = openface_model)
+ elif model_name == "DeepFace":
+ resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = deepface_model)
+ else:
+ return jsonify({'success': False, 'error': 'You must pass a valid model name. Available models are VGG-Face, Facenet, OpenFace, DeepFace but you passed %s' % (model_name)}), 205
+
+ #--------------------------
+
+ toc = time.time()
+
+ resp_obj["trx_id"] = trx_id
+ resp_obj["seconds"] = toc-tic
+
+ return resp_obj, 200
+
+if __name__ == '__main__':
+
+ app.run()
\ No newline at end of file
diff --git a/deepface/basemodels/OpenFace.py b/deepface/basemodels/OpenFace.py
index 08eae7c..c2f3e71 100644
--- a/deepface/basemodels/OpenFace.py
+++ b/deepface/basemodels/OpenFace.py
@@ -32,7 +32,7 @@ def loadModel():
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
- Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x)
+ x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)