api via gunicorn

This commit is contained in:
Sefik Ilkin Serengil 2023-02-01 18:38:00 +00:00
parent 9fed010762
commit 0d2b94679a
12 changed files with 266 additions and 517 deletions

View File

@ -437,7 +437,8 @@ disable=raw-checker-failed,
no-name-in-module,
unrecognized-option,
consider-using-dict-items,
consider-iterating-dictionary
consider-iterating-dictionary,
unexpected-keyword-arg
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option

View File

@ -39,5 +39,4 @@ ENV PYTHONUNBUFFERED=1
# -----------------------------------
# run the app (re-configure port if necessary)
EXPOSE 5000
# flask run is not recommended in production, move this to gunicorn
CMD ["python", "/app/api.py", "--port", "5000"]
CMD ["gunicorn", "--workers=1", "--timeout=3600", "--bind=0.0.0.0:5000", "app:create_app()"]

View File

@ -272,15 +272,16 @@ user
**API** - [`Demo`](https://youtu.be/HeKCQ6U9XmI)
Deepface serves an API as well. You can clone [`/api/api.py`](https://github.com/serengil/deepface/tree/master/api/api.py) and pass it to python command as an argument. This will get a rest service up. In this way, you can call deepface from an external system such as mobile app or web.
DeepFace serves an API as well. You can clone [`/api`](https://github.com/serengil/deepface/tree/master/api) folder and run the api via gunicorn server. This will get a rest service up. In this way, you can call deepface from an external system such as mobile app or web.
```
python api.py
```shell
cd scripts
./service.sh
```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-api.jpg" width="90%" height="90%"></p>
Face recognition, facial attribute analysis and vector representation functions are covered in the API. You are expected to call these functions as http post methods. Service endpoints will be `http://127.0.0.1:5000/verify` for face recognition, `http://127.0.0.1:5000/analyze` for facial attribute analysis, and `http://127.0.0.1:5000/represent` for vector representation. You should pass input images as base64 encoded string in this case. [Here](https://github.com/serengil/deepface/tree/master/api), you can find a postman project.
Face recognition, facial attribute analysis and vector representation functions are covered in the API. You are expected to call these functions as http post methods. Default service endpoints will be `http://localhost:5000/verify` for face recognition, `http://localhost:5000/analyze` for facial attribute analysis, and `http://localhost:5000/represent` for vector representation. You can pass input images as exact image paths on your environment, base64 encoded strings or images on web. [Here](https://github.com/serengil/deepface/tree/master/api), you can find a postman project to find out how these methods should be called.
**Dockerized Service**

View File

@ -1,308 +0,0 @@
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#------------------------------
from flask import Flask, jsonify, request, make_response
import argparse
import uuid
import json
import time
from tqdm import tqdm
#------------------------------
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
#------------------------------
if tf_version == 2:
import logging
tf.get_logger().setLevel(logging.ERROR)
#------------------------------
from deepface import DeepFace
#------------------------------
app = Flask(__name__)
#------------------------------
if tf_version == 1:
graph = tf.get_default_graph()
#------------------------------
#Service API Interface
@app.route('/')
def index():
return '<h1>Hello, world!</h1>'
@app.route('/analyze', methods=['POST'])
def analyze():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
#---------------------------
if tf_version == 1:
with graph.as_default():
resp_obj = analyzeWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = analyzeWrapper(req, trx_id)
#---------------------------
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj, 200
def analyzeWrapper(req, trx_id = 0):
resp_obj = {}
instances = []
if "img" in list(req.keys()):
raw_content = req["img"] #list
for item in raw_content: #item is in type of dict
instances.append(item)
if len(instances) == 0:
return {'success': False, 'error': 'you must pass at least one img object in your request'}
print("Analyzing ", len(instances)," instances")
#---------------------------
detector_backend = 'opencv'
actions= ['emotion', 'age', 'gender', 'race']
align = True
enforce_detection = True
if "actions" in list(req.keys()):
actions = req["actions"]
if "detector_backend" in list(req.keys()):
detector_backend = req["detector_backend"]
if "align" in list(req.keys()):
align = req["align"]
if "enforce_detection" in list(req.keys()):
enforce_detection = req["enforce_detection"]
#---------------------------
try:
resp_obj["demographies"] = {}
for idx, instance in enumerate(instances):
demographies = DeepFace.analyze(img_path = instance,
detector_backend = detector_backend,
actions = actions, align = align,
enforce_detection = enforce_detection)
resp_obj["demographies"][f"img_{idx+1}"] = demographies
except Exception as err:
print("Exception: ", str(err))
return jsonify({'success': False, 'error': str(err)}), 205
#---------------
return resp_obj
@app.route('/verify', methods=['POST'])
def verify():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
resp_obj = jsonify({'success': False})
if tf_version == 1:
with graph.as_default():
resp_obj = verifyWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = verifyWrapper(req, trx_id)
#--------------------------
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj, 200
def verifyWrapper(req, trx_id = 0):
resp_obj = {}
model_name = "VGG-Face"; distance_metric = "cosine"; detector_backend = "opencv"
align = True; enforce_detection = True
if "model_name" in list(req.keys()):
model_name = req["model_name"]
if "distance_metric" in list(req.keys()):
distance_metric = req["distance_metric"]
if "detector_backend" in list(req.keys()):
detector_backend = req["detector_backend"]
if "align" in list(req.keys()):
align = req["align"]
if "enforce_detection" in list(req.keys()):
enforce_detection = req["enforce_detection"]
#----------------------
try:
if "img" in list(req.keys()):
raw_content = req["img"] #list
if len(raw_content) == 0:
return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
print("Input request of ", trx_id, " has ",len(raw_content)," pairs to verify")
results = []
for idx, item in enumerate(raw_content): #item is in type of dict
img1 = item["img1"]; img2 = item["img2"]
validate_img1 = False
if len(img1) > 11 and img1[0:11] == "data:image/":
validate_img1 = True
validate_img2 = False
if len(img2) > 11 and img2[0:11] == "data:image/":
validate_img2 = True
if validate_img1 != True or validate_img2 != True:
return jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205
result = DeepFace.verify(img1_path=img1,
img2_path=img2,
model_name=model_name,
detector_backend=detector_backend,
distance_metric=distance_metric,
align=align,
enforce_detection=enforce_detection,
)
results.append(result)
resp_obj[f"pairs"] = results
except Exception as err:
resp_obj = jsonify({'success': False, 'error': str(err)}), 205
return resp_obj
@app.route('/represent', methods=['POST'])
def represent():
global graph
tic = time.time()
req = request.get_json()
trx_id = uuid.uuid4()
resp_obj = jsonify({'success': False})
if tf_version == 1:
with graph.as_default():
resp_obj = representWrapper(req, trx_id)
elif tf_version == 2:
resp_obj = representWrapper(req, trx_id)
#--------------------------
toc = time.time()
resp_obj["trx_id"] = trx_id
resp_obj["seconds"] = toc-tic
return resp_obj, 200
def representWrapper(req, trx_id = 0):
resp_obj = jsonify({'success': False})
#-------------------------------------
#find out model
model_name = "VGG-Face"; detector_backend = 'opencv'
if "model_name" in list(req.keys()):
model_name = req["model_name"]
if "detector_backend" in list(req.keys()):
detector_backend = req["detector_backend"]
#-------------------------------------
#retrieve images from request
img = ""
if "img" in list(req.keys()):
img = req["img"] #list
#print("img: ", img)
validate_img = False
if len(img) > 11 and img[0:11] == "data:image/":
validate_img = True
if validate_img != True:
print("invalid image passed!")
return jsonify({'success': False, 'error': 'you must pass img as base64 encoded string'}), 205
#-------------------------------------
#call represent function from the interface
try:
embedding_objs = DeepFace.represent(img
, model_name = model_name
, detector_backend = detector_backend
)
except Exception as err:
print("Exception: ",str(err))
resp_obj = jsonify({'success': False, 'error': str(err)}), 205
#-------------------------------------
#print("embedding is ", len(embedding)," dimensional vector")
resp_obj = {}
faces = []
for embedding_obj in embedding_objs:
face = {}
face["embedding"] = embedding_obj["embedding"]
face["facial_area"] = embedding_obj["facial_area"]
face["model_name"] = model_name
face["detector_backend"] = detector_backend
faces.append(face)
resp_obj["embeddings"] = faces
#-------------------------------------
return resp_obj
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port',
type=int,
default=5000,
help='Port of serving api')
args = parser.parse_args()
app.run(host='0.0.0.0', port=args.port)

9
api/app.py Normal file
View File

@ -0,0 +1,9 @@
# 3rd parth dependencies
from flask import Flask
from routes import blueprint
def create_app():
app = Flask(__name__)
app.register_blueprint(blueprint)
return app

View File

@ -0,0 +1,102 @@
{
"info": {
"_postman_id": "4c0b144e-4294-4bdd-8072-bcb326b1fed2",
"name": "deepface-api",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"item": [
{
"name": "Represent",
"request": {
"method": "POST",
"header": [],
"body": {
"mode": "raw",
"raw": "{\n \"model_name\": \"Facenet\",\n \"img\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\"\n}",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "http://127.0.0.1:5000/represent",
"protocol": "http",
"host": [
"127",
"0",
"0",
"1"
],
"port": "5000",
"path": [
"represent"
]
}
},
"response": []
},
{
"name": "Face verification",
"request": {
"method": "POST",
"header": [],
"body": {
"mode": "raw",
"raw": " {\n \t\"img1_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\",\n \"img2_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img2.jpg\",\n \"model_name\": \"Facenet\",\n \"detector_backend\": \"mtcnn\",\n \"distance_metric\": \"euclidean\"\n }",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "http://127.0.0.1:5000/verify",
"protocol": "http",
"host": [
"127",
"0",
"0",
"1"
],
"port": "5000",
"path": [
"verify"
]
}
},
"response": []
},
{
"name": "Face analysis",
"request": {
"method": "POST",
"header": [],
"body": {
"mode": "raw",
"raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \"actions\": [\"age\", \"gender\", \"emotion\", \"race\"]\n}",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "http://127.0.0.1:5000/analyze",
"protocol": "http",
"host": [
"127",
"0",
"0",
"1"
],
"port": "5000",
"path": [
"analyze"
]
}
},
"response": []
}
]
}

File diff suppressed because one or more lines are too long

100
api/routes.py Normal file
View File

@ -0,0 +1,100 @@
from flask import Blueprint, request
import service
blueprint = Blueprint("routes", __name__)
@blueprint.route("/")
def home():
return "<h1>Welcome to DeepFace API!</h1>"
@blueprint.route("/represent", methods=["POST"])
def represent():
input_args = request.get_json()
if input_args is None:
return {"message": "empty input set passed"}
img_path = input_args.get("img")
if img_path is None:
return {"message": "you must pass img_path input"}
model_name = input_args.get("model_name", "VGG-Face")
detector_backend = input_args.get("detector_backend", "opencv")
enforce_detection = input_args.get("enforce_detection", True)
align = input_args.get("align", True)
obj = service.represent(
img_path=img_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
)
return obj
@blueprint.route("/verify", methods=["POST"])
def verify():
input_args = request.get_json()
if input_args is None:
return {"message": "empty input set passed"}
img1_path = input_args.get("img1_path")
img2_path = input_args.get("img2_path")
if img1_path is None:
return {"message": "you must pass img1_path input"}
if img2_path is None:
return {"message": "you must pass img2_path input"}
model_name = input_args.get("model_name", "VGG-Face")
detector_backend = input_args.get("detector_backend", "opencv")
enforce_detection = input_args.get("enforce_detection", True)
distance_metric = input_args.get("distance_metric", "cosine")
align = input_args.get("align", True)
verification = service.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model_name,
detector_backend=detector_backend,
distance_metric=distance_metric,
align=align,
enforce_detection=enforce_detection,
)
verification["verified"] = str(verification["verified"])
return verification
@blueprint.route("/analyze", methods=["POST"])
def analyze():
input_args = request.get_json()
if input_args is None:
return {"message": "empty input set passed"}
img_path = input_args.get("img_path")
if img_path is None:
return {"message": "you must pass img_path input"}
detector_backend = input_args.get("detector_backend", "opencv")
enforce_detection = input_args.get("enforce_detection", True)
align = input_args.get("align", True)
actions = input_args.get("actions", ["age", "gender", "emotion", "race"])
demographies = service.analyze(
img_path=img_path,
actions=actions,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
)
return demographies

42
api/service.py Normal file
View File

@ -0,0 +1,42 @@
from deepface import DeepFace
def represent(img_path, model_name, detector_backend, enforce_detection, align):
result = {}
embedding_objs = DeepFace.represent(
img_path=img_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
)
result["results"] = embedding_objs
return result
def verify(
img1_path, img2_path, model_name, detector_backend, distance_metric, enforce_detection, align
):
obj = DeepFace.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model_name,
detector_backend=detector_backend,
distance_metric=distance_metric,
align=align,
enforce_detection=enforce_detection,
)
return obj
def analyze(img_path, actions, detector_backend, enforce_detection, align):
result = {}
demographies = DeepFace.analyze(
img_path=img_path,
actions=actions,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
)
result["results"] = demographies
return result

View File

@ -14,3 +14,4 @@ dlib>=19.20.0
retina-face>=0.0.1
mediapipe>=0.8.7.3
fire>=0.4.0
gunicorn>=60.9.1

3
scripts/service.sh Normal file
View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
cd ../api
gunicorn --workers=1 --timeout=3600 --bind=0.0.0.0:5000 "app:create_app()"

View File

@ -23,5 +23,5 @@ setuptools.setup(
["deepface = deepface.DeepFace:cli"],
},
python_requires='>=3.5.5',
install_requires=["numpy>=1.14.0", "pandas>=0.23.4", "tqdm>=4.30.0", "gdown>=3.10.1", "Pillow>=5.2.0", "opencv-python>=4.5.5.64", "tensorflow>=1.9.0", "keras>=2.2.0", "Flask>=1.1.2", "mtcnn>=0.1.0", "retina-face>=0.0.1", "fire>=0.4.0"]
install_requires=["numpy>=1.14.0", "pandas>=0.23.4", "tqdm>=4.30.0", "gdown>=3.10.1", "Pillow>=5.2.0", "opencv-python>=4.5.5.64", "tensorflow>=1.9.0", "keras>=2.2.0", "Flask>=1.1.2", "mtcnn>=0.1.0", "retina-face>=0.0.1", "fire>=0.4.0", "gunicorn>=60.9.1"]
)