api support init

This commit is contained in:
Şefik Serangil 2020-04-09 12:04:51 +03:00
parent 54c1c28b35
commit 8427be05c2
5 changed files with 324 additions and 26 deletions

120
api/DeepFaceApi.py Normal file
View File

@ -0,0 +1,120 @@
from flask import Flask, jsonify, request, make_response
import uuid
import json
import time
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#------------------------------
app = Flask(__name__)
#------------------------------
#Service API Interface
@app.route('/analyze', methods=['POST'])
def analyze():
req = request.get_json()
trx_id = uuid.uuid4()
#---------------------------
tic = time.time()
instances = []
if "img" in list(req.keys()):
raw_content = req["img"] #list
for item in raw_content: #item is in type of dict
instances.append(item)
if len(instances) == 0:
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
print("Analyzing ", len(instances)," instances")
#---------------------------
actions= ['emotion', 'age', 'gender', 'race']
if "actions" in list(req.keys()):
actions = req["actions"]
#---------------------------
resp_obj = DeepFace.analyze(instances, actions=actions)
#---------------------------
#resp_obj = json.loads("{\"success\": true}")
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj
@app.route('/verify', methods=['POST'])
def verify():
req = request.get_json()
trx_id = uuid.uuid4()
tic = time.time()
#-------------------------
model_name = "VGG-Face"; distance_metric = "cosine"
if "model_name" in list(req.keys()):
model_name = req["model_name"]
if "distance_metric" in list(req.keys()):
distance_metric = req["distance_metric"]
instances = []
if "img" in list(req.keys()):
raw_content = req["img"] #list
for item in raw_content: #item is in type of dict
instance = []
img1 = item["img1"]; img2 = item["img2"]
validate_img1 = False
if len(img1) > 11 and img1[0:11] == "data:image/":
validate_img1 = True
validate_img2 = False
if len(img2) > 11 and img2[0:11] == "data:image/":
validate_img2 = True
if validate_img1 != True or validate_img2 != True:
return jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205
instance.append(img1); instance.append(img2)
instances.append(instance)
#--------------------------
if len(instances) == 0:
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
#--------------------------
resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric)
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
#--------------------------
return resp_obj, 200
if __name__ == '__main__':
app.run()

File diff suppressed because one or more lines are too long

View File

@ -9,6 +9,7 @@ import pandas as pd
from tqdm import tqdm
import json
import cv2
from keras import backend as K
#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#from extendedmodels import Age, Gender, Race, Emotion
@ -18,10 +19,14 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst
#TO-DO: pass built model optionally as input. I will get complex models up in rest api once and call these functions directly
def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
tic = time.time()
K.clear_session()
if type(img1_path) == list:
bulkProcess = True
@ -54,7 +59,7 @@ def verify(img1_path, img2_path=''
else:
raise ValueError("Invalid model_name passed - ", model_name)
#------------------------------
#tuned thresholds for model and metric pair
@ -67,14 +72,6 @@ def verify(img1_path, img2_path=''
img1_path = instance[0]
img2_path = instance[1]
#----------------------
if os.path.isfile(img1_path) != True:
raise ValueError("Confirm that ",img1_path," exists")
if os.path.isfile(img2_path) != True:
raise ValueError("Confirm that ",img2_path," exists")
#----------------------
#crop and align faces
@ -83,7 +80,7 @@ def verify(img1_path, img2_path=''
#----------------------
#find embeddings
img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:]
@ -140,6 +137,7 @@ def verify(img1_path, img2_path=''
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
K.clear_session()
return resp_obj
#----------------------
@ -153,9 +151,25 @@ def verify(img1_path, img2_path=''
#print("identification lasts ",toc-tic," seconds")
if bulkProcess == True:
return resp_objects
K.clear_session()
resp_obj = "{"
for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i])
if i > 0:
resp_obj += ", "
resp_obj += "\"pair_"+str(i+1)+"\": "+resp_item
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
#return resp_objects
def analyze(img_path, actions= []):
K.clear_session()
if type(img_path) == list:
img_paths = img_path.copy()
@ -190,12 +204,6 @@ def analyze(img_path, actions= []):
resp_objects = []
for img_path in img_paths:
if type(img_path) != str:
raise ValueError("You should pass string data type for image paths but you passed ", type(img_path))
if os.path.isfile(img_path) != True:
raise ValueError("Confirm that ",img_path," exists")
resp_obj = "{"
#TO-DO: do this in parallel
@ -285,10 +293,25 @@ def analyze(img_path, actions= []):
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
K.clear_session()
return resp_obj
if bulkProcess == True:
return resp_objects
K.clear_session()
resp_obj = "{"
for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i])
if i > 0:
resp_obj += ", "
resp_obj += "\"instance_"+str(i+1)+"\": "+resp_item
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
#return resp_objects
def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)

View File

@ -12,6 +12,13 @@ import hashlib
import math
from PIL import Image
import copy
import base64
def loadBase64Img(uri):
encoded_data = uri.split(',')[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def distance(a, b):
x1 = a[0]; y1 = a[1]
@ -133,6 +140,10 @@ def detectFace(img, target_size=(224, 224), grayscale = False):
if type(img).__module__ == np.__name__:
exact_image = True
base64_img = False
if len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
#-----------------------
opencv_path = get_opencv_path()
@ -147,9 +158,16 @@ def detectFace(img, target_size=(224, 224), grayscale = False):
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
if exact_image != True: #image path passed as input
if base64_img == True:
img = loadBase64Img(img)
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
img = cv2.imread(img)
img_raw = img.copy()
#--------------------------------

View File

@ -17,8 +17,8 @@ dataset = [
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj[0]["verified"] == True)
print(resp_obj[1]["verified"] == True)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)
print("-----------------------------------------")
@ -32,10 +32,10 @@ dataset = [
]
resp_obj = DeepFace.analyze(dataset)
print(resp_obj[0]["age"]," years old ", resp_obj[0]["dominant_emotion"], " ",resp_obj[0]["gender"])
print(resp_obj[1]["age"]," years old ", resp_obj[1]["dominant_emotion"], " ",resp_obj[1]["gender"])
print(resp_obj[2]["age"]," years old ", resp_obj[2]["dominant_emotion"], " ",resp_obj[2]["gender"])
print(resp_obj[3]["age"]," years old ", resp_obj[3]["dominant_emotion"], " ",resp_obj[3]["gender"])
print(resp_obj["instance_1"]["age"]," years old ", resp_obj["instance_1"]["dominant_emotion"], " ",resp_obj["instance_1"]["gender"])
print(resp_obj["instance_2"]["age"]," years old ", resp_obj["instance_2"]["dominant_emotion"], " ",resp_obj["instance_2"]["gender"])
print(resp_obj["instance_3"]["age"]," years old ", resp_obj["instance_3"]["dominant_emotion"], " ",resp_obj["instance_3"]["gender"])
print(resp_obj["instance_4"]["age"]," years old ", resp_obj["instance_4"]["dominant_emotion"], " ",resp_obj["instance_4"]["gender"])
print("-----------------------------------------")