mirror of
https://github.com/serengil/deepface.git
synced 2025-06-06 19:45:21 +00:00
new interface
This commit is contained in:
parent
9c6434a145
commit
dec0c34b2d
43
README.md
43
README.md
@ -46,7 +46,7 @@ A modern [**face recognition pipeline**](https://sefiks.com/2020/05/01/a-gentle-
|
||||
|
||||
**Face Verification** - [`Demo`](https://youtu.be/KRCvkNCOphE)
|
||||
|
||||
This function verifies face pairs as same person or different persons. It expects exact image paths as inputs. Passing numpy or base64 encoded images is also welcome. Then, it is going to return a dictionary and you should check just its verified key.
|
||||
This function verifies face pairs as same person or different persons. It expects exact image paths as inputs. Passing numpy or base64 encoded images is also welcome. Then, it is going to return a dictionary and you should check just its verified key. Verification function can also handle many faces in the face pairs. In this case, the most similar faces will be compared.
|
||||
|
||||
```python
|
||||
result = DeepFace.verify(img1_path = "img1.jpg", img2_path = "img2.jpg")
|
||||
@ -56,26 +56,27 @@ result = DeepFace.verify(img1_path = "img1.jpg", img2_path = "img2.jpg")
|
||||
|
||||
**Face recognition** - [`Demo`](https://youtu.be/Hrjp-EStM_s)
|
||||
|
||||
[Face recognition](https://sefiks.com/2020/05/25/large-scale-face-recognition-for-deep-learning/) requires applying face verification many times. Herein, deepface has an out-of-the-box find function to handle this action. It's going to look for the identity of input image in the database path and it will return pandas data frame as output.
|
||||
[Face recognition](https://sefiks.com/2020/05/25/large-scale-face-recognition-for-deep-learning/) requires applying face verification many times. Herein, deepface has an out-of-the-box find function to handle this action. It's going to look for the identity of input image in the database path and it will return list of pandas data frame as output. Result is going to be the size of faces appearing in the image path.
|
||||
|
||||
|
||||
```python
|
||||
df = DeepFace.find(img_path = "img1.jpg", db_path = "C:/workspace/my_db")
|
||||
dfs = DeepFace.find(img_path = "img1.jpg", db_path = "C:/workspace/my_db")
|
||||
```
|
||||
|
||||
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-6-v2.jpg" width="95%" height="95%"></p>
|
||||
|
||||
**Embeddings**
|
||||
|
||||
Face recognition models basically represent facial images as multi-dimensional vectors. Sometimes, you need those embedding vectors directly. DeepFace comes with a dedicated representation function.
|
||||
Face recognition models basically represent facial images as multi-dimensional vectors. Sometimes, you need those embedding vectors directly. DeepFace comes with a dedicated representation function. Represent function returns a list of embeddings. Result is going to be the size of faces appearing in the image path.
|
||||
|
||||
```python
|
||||
embedding = DeepFace.represent(img_path = "img.jpg")
|
||||
embedding_objs = DeepFace.represent(img_path = "img.jpg")
|
||||
```
|
||||
|
||||
This function returns an array as output. The size of the output array would be different based on the model name. For instance, VGG-Face is the default model for deepface and it represents facial images as 2622 dimensional vectors.
|
||||
This function returns an array as embedding. The size of the embedding array would be different based on the model name. For instance, VGG-Face is the default model and it represents facial images as 2622 dimensional vectors.
|
||||
|
||||
```python
|
||||
embedding = embedding_objs[0]["embedding"]
|
||||
assert isinstance(embedding, list)
|
||||
assert model_name = "VGG-Face" and len(embedding) == 2622
|
||||
```
|
||||
@ -104,18 +105,18 @@ models = [
|
||||
#face verification
|
||||
result = DeepFace.verify(img1_path = "img1.jpg",
|
||||
img2_path = "img2.jpg",
|
||||
model_name = models[1]
|
||||
model_name = models[0]
|
||||
)
|
||||
|
||||
#face recognition
|
||||
df = DeepFace.find(img_path = "img1.jpg",
|
||||
dfs = DeepFace.find(img_path = "img1.jpg",
|
||||
db_path = "C:/workspace/my_db",
|
||||
model_name = models[1]
|
||||
)
|
||||
|
||||
#embeddings
|
||||
embedding = DeepFace.represent(img_path = "img.jpg",
|
||||
model_name = models[1]
|
||||
embedding_objs = DeepFace.represent(img_path = "img.jpg",
|
||||
model_name = models[2]
|
||||
)
|
||||
```
|
||||
|
||||
@ -151,9 +152,9 @@ result = DeepFace.verify(img1_path = "img1.jpg",
|
||||
)
|
||||
|
||||
#face recognition
|
||||
df = DeepFace.find(img_path = "img1.jpg",
|
||||
dfs = DeepFace.find(img_path = "img1.jpg",
|
||||
db_path = "C:/workspace/my_db",
|
||||
distance_metric = metrics[1]
|
||||
distance_metric = metrics[2]
|
||||
)
|
||||
```
|
||||
|
||||
@ -164,7 +165,7 @@ Euclidean L2 form [seems](https://youtu.be/i_MOwvhbLdI) to be more stable than c
|
||||
Deepface also comes with a strong facial attribute analysis module including [`age`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`gender`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`facial expression`](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) (including angry, fear, neutral, sad, disgust, happy and surprise) and [`race`](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/) (including asian, white, middle eastern, indian, latino and black) predictions.
|
||||
|
||||
```python
|
||||
obj = DeepFace.analyze(img_path = "img4.jpg",
|
||||
objs = DeepFace.analyze(img_path = "img4.jpg",
|
||||
actions = ['age', 'gender', 'race', 'emotion']
|
||||
)
|
||||
```
|
||||
@ -195,27 +196,27 @@ backends = [
|
||||
#face verification
|
||||
obj = DeepFace.verify(img1_path = "img1.jpg",
|
||||
img2_path = "img2.jpg",
|
||||
detector_backend = backends[4]
|
||||
detector_backend = backends[0]
|
||||
)
|
||||
|
||||
#face recognition
|
||||
df = DeepFace.find(img_path = "img.jpg",
|
||||
dfs = DeepFace.find(img_path = "img.jpg",
|
||||
db_path = "my_db",
|
||||
detector_backend = backends[4]
|
||||
detector_backend = backends[1]
|
||||
)
|
||||
|
||||
#embeddings
|
||||
embedding = DeepFace.represent(img_path = "img.jpg",
|
||||
detector_backend = backends[4]
|
||||
embedding_objs = DeepFace.represent(img_path = "img.jpg",
|
||||
detector_backend = backends[2]
|
||||
)
|
||||
|
||||
#facial analysis
|
||||
demography = DeepFace.analyze(img_path = "img4.jpg",
|
||||
detector_backend = backends[4]
|
||||
demographies = DeepFace.analyze(img_path = "img4.jpg",
|
||||
detector_backend = backends[3]
|
||||
)
|
||||
|
||||
#face detection and alignment
|
||||
face = DeepFace.detectFace(img_path = "img.jpg",
|
||||
face_objs = DeepFace.extract_face(img_path = "img.jpg",
|
||||
target_size = (224, 224),
|
||||
detector_backend = backends[4]
|
||||
)
|
||||
|
@ -210,37 +210,15 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_d
|
||||
This function analyzes facial attributes including age, gender, emotion and race. In the background, analysis function builds convolutional neural network models to classify age, gender, emotion and race of the input image.
|
||||
|
||||
Parameters:
|
||||
<<<<<<< HEAD
|
||||
img_path: exact image path, numpy array (BGR) or base64 encoded image could be passed.
|
||||
|
||||
actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop some of those attributes.
|
||||
|
||||
=======
|
||||
img_path (string): exact image path. Alterntively, numpy array (BGR) or base64 encoded image could be passed. If you are going to analyze lots of images, then set this to list. e.g. img_path = ['img1.jpg', 'img2.jpg']
|
||||
|
||||
actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop some of those attributes.
|
||||
|
||||
models: facial attribute analysis models are built in every call of analyze function. You can pass pre-built models with this argument.
|
||||
|
||||
models = {}
|
||||
models['age'] = DeepFace.build_model('Age')
|
||||
models['gender'] = DeepFace.build_model('Gender')
|
||||
models['emotion'] = DeepFace.build_model('Emotion')
|
||||
models['race'] = DeepFace.build_model('Race')
|
||||
|
||||
>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3
|
||||
enforce_detection (boolean): The function throws exception if no face detected by default. Set this to False if you don't want to get exception. This might be convenient for low resolution images.
|
||||
|
||||
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe.
|
||||
|
||||
<<<<<<< HEAD
|
||||
silent (boolean): disable (some) log messages
|
||||
=======
|
||||
prog_bar (boolean): enable/disable a progress bar
|
||||
|
||||
Returns:
|
||||
The function returns a dictionary. If img_path is a list, then it will return list of dictionary.
|
||||
>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3
|
||||
|
||||
Returns:
|
||||
The function returns a list of dictionaries for each face appearing in the image.
|
||||
@ -540,15 +518,6 @@ def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection
|
||||
Parameters:
|
||||
img_path (string): exact image path. Alternatively, numpy array (BGR) or based64 encoded images could be passed.
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble
|
||||
|
||||
model: Built deepface model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times. Consider to pass model if you are going to call represent function in a for loop.
|
||||
|
||||
model = DeepFace.build_model('VGG-Face')
|
||||
|
||||
>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3
|
||||
enforce_detection (boolean): If any face could not be detected in an image, then verify function will return exception. Set this to False not to have this exception. This might be convenient for low resolution images.
|
||||
|
||||
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe
|
||||
|
@ -2,6 +2,7 @@ import warnings
|
||||
import os
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import cv2
|
||||
from deepface import DeepFace
|
||||
|
||||
@ -35,7 +36,7 @@ def evaluate(condition):
|
||||
# ------------------------------------------------
|
||||
|
||||
detectors = ['opencv', 'mtcnn']
|
||||
models = ['VGG-Face', 'Facenet', 'Facenet512', 'ArcFace', 'SFace']
|
||||
models = ['VGG-Face', 'Facenet', 'ArcFace']
|
||||
metrics = ['cosine', 'euclidean', 'euclidean_l2']
|
||||
|
||||
dataset = [
|
||||
@ -56,82 +57,179 @@ print("-----------------------------------------")
|
||||
|
||||
def test_cases():
|
||||
|
||||
print("DeepFace.detectFace test")
|
||||
print("Enforce detection test")
|
||||
black_img = np.zeros([224, 224, 3])
|
||||
|
||||
# enforce detection on for represent
|
||||
try:
|
||||
DeepFace.represent(img_path=black_img)
|
||||
exception_thrown = False
|
||||
except:
|
||||
exception_thrown = True
|
||||
|
||||
assert exception_thrown is True
|
||||
|
||||
# -------------------------------------------
|
||||
|
||||
# enforce detection off for represent
|
||||
try:
|
||||
objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
|
||||
exception_thrown = False
|
||||
|
||||
# validate response of represent function
|
||||
assert isinstance(objs, list)
|
||||
assert len(objs) > 0
|
||||
assert isinstance(objs[0], dict)
|
||||
assert "embedding" in objs[0].keys()
|
||||
assert "facial_area" in objs[0].keys()
|
||||
assert isinstance(objs[0]["facial_area"], dict)
|
||||
assert "x" in objs[0]["facial_area"].keys()
|
||||
assert "y" in objs[0]["facial_area"].keys()
|
||||
assert "w" in objs[0]["facial_area"].keys()
|
||||
assert "h" in objs[0]["facial_area"].keys()
|
||||
assert isinstance(objs[0]["embedding"], list)
|
||||
assert len(objs[0]["embedding"]) == 2622 #embedding of VGG-Face
|
||||
except:
|
||||
exception_thrown = True
|
||||
|
||||
assert exception_thrown is False
|
||||
|
||||
# -------------------------------------------
|
||||
# enforce detection on for verify
|
||||
try:
|
||||
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img)
|
||||
exception_thrown = False
|
||||
except:
|
||||
exception_thrown = True
|
||||
|
||||
assert exception_thrown is True
|
||||
# -------------------------------------------
|
||||
# enforce detection off for verify
|
||||
|
||||
try:
|
||||
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
|
||||
assert isinstance(obj, dict)
|
||||
exception_thrown = False
|
||||
except:
|
||||
exception_thrown = True
|
||||
|
||||
assert exception_thrown is False
|
||||
# -------------------------------------------
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Extract faces test")
|
||||
|
||||
for detector in detectors:
|
||||
img = DeepFace.detectFace("dataset/img11.jpg", detector_backend = detector)
|
||||
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
|
||||
print(detector," test is done")
|
||||
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend = detector)
|
||||
for img_obj in img_objs:
|
||||
assert "face" in img_obj.keys()
|
||||
assert "facial_area" in img_obj.keys()
|
||||
assert isinstance(img_obj["facial_area"], dict)
|
||||
assert "x" in img_obj["facial_area"].keys()
|
||||
assert "y" in img_obj["facial_area"].keys()
|
||||
assert "w" in img_obj["facial_area"].keys()
|
||||
assert "h" in img_obj["facial_area"].keys()
|
||||
assert "confidence" in img_obj.keys()
|
||||
|
||||
img = img_obj["face"]
|
||||
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
|
||||
print(detector," test is done")
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
img_path = "dataset/img1.jpg"
|
||||
embedding = DeepFace.represent(img_path)
|
||||
print("Function returned ", len(embedding), "dimensional vector")
|
||||
evaluate(len(embedding) > 0)
|
||||
embedding_objs = DeepFace.represent(img_path)
|
||||
for embedding_obj in embedding_objs:
|
||||
embedding = embedding_obj["embedding"]
|
||||
print("Function returned ", len(embedding), "dimensional vector")
|
||||
evaluate(len(embedding) == 2622)
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Face detectors test")
|
||||
print("Different face detectors on verification test")
|
||||
|
||||
for detector in detectors:
|
||||
print(detector + " detector")
|
||||
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend = detector)
|
||||
|
||||
assert isinstance(res, dict)
|
||||
assert "verified" in res.keys()
|
||||
assert res["verified"] in [True, False]
|
||||
assert "distance" in res.keys()
|
||||
assert "threshold" in res.keys()
|
||||
assert "model" in res.keys()
|
||||
assert "detector_backend" in res.keys()
|
||||
assert "similarity_metric" in res.keys()
|
||||
assert "facial_areas" in res.keys()
|
||||
assert "img1" in res["facial_areas"].keys()
|
||||
assert "img2" in res["facial_areas"].keys()
|
||||
assert "x" in res["facial_areas"]["img1"].keys()
|
||||
assert "y" in res["facial_areas"]["img1"].keys()
|
||||
assert "w" in res["facial_areas"]["img1"].keys()
|
||||
assert "h" in res["facial_areas"]["img1"].keys()
|
||||
assert "x" in res["facial_areas"]["img2"].keys()
|
||||
assert "y" in res["facial_areas"]["img2"].keys()
|
||||
assert "w" in res["facial_areas"]["img2"].keys()
|
||||
assert "h" in res["facial_areas"]["img2"].keys()
|
||||
|
||||
print(res)
|
||||
assert res["verified"] == dataset[0][2]
|
||||
evaluate(res["verified"] == dataset[0][2])
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Find function test")
|
||||
|
||||
df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset")
|
||||
print(df.head())
|
||||
evaluate(df.shape[0] > 0)
|
||||
dfs = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset")
|
||||
for df in dfs:
|
||||
assert isinstance(df, pd.DataFrame)
|
||||
print(df.head())
|
||||
evaluate(df.shape[0] > 0)
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Facial analysis test. Passing nothing as an action")
|
||||
|
||||
img = "dataset/img4.jpg"
|
||||
demography = DeepFace.analyze(img)
|
||||
print(demography)
|
||||
|
||||
evaluate(demography["age"] > 20 and demography["age"] < 40)
|
||||
evaluate(demography["dominant_gender"] == "Woman")
|
||||
demography_objs = DeepFace.analyze(img)
|
||||
for demography in demography_objs:
|
||||
print(demography)
|
||||
evaluate(demography["age"] > 20 and demography["age"] < 40)
|
||||
evaluate(demography["dominant_gender"] == "Woman")
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Facial analysis test. Passing all to the action")
|
||||
demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])
|
||||
demography_objs = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])
|
||||
|
||||
print("Demography:")
|
||||
print(demography)
|
||||
for demography in demography_objs:
|
||||
#print(f"Demography: {demography}")
|
||||
#check response is a valid json
|
||||
print("Age: ", demography["age"])
|
||||
print("Gender: ", demography["dominant_gender"])
|
||||
print("Race: ", demography["dominant_race"])
|
||||
print("Emotion: ", demography["dominant_emotion"])
|
||||
|
||||
#check response is a valid json
|
||||
print("Age: ", demography["age"])
|
||||
print("Gender: ", demography["dominant_gender"])
|
||||
print("Race: ", demography["dominant_race"])
|
||||
print("Emotion: ", demography["dominant_emotion"])
|
||||
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is not None)
|
||||
evaluate(demography.get("dominant_emotion") is not None)
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is not None)
|
||||
evaluate(demography.get("dominant_emotion") is not None)
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
print("Facial analysis test 2. Remove some actions and check they are not computed")
|
||||
demography = DeepFace.analyze(img, ['age', 'gender'])
|
||||
demography_objs = DeepFace.analyze(img, ['age', 'gender'])
|
||||
|
||||
print("Age: ", demography.get("age"))
|
||||
print("Gender: ", demography.get("dominant_gender"))
|
||||
print("Race: ", demography.get("dominant_race"))
|
||||
print("Emotion: ", demography.get("dominant_emotion"))
|
||||
for demography in demography_objs:
|
||||
print("Age: ", demography.get("age"))
|
||||
print("Gender: ", demography.get("dominant_gender"))
|
||||
print("Race: ", demography.get("dominant_race"))
|
||||
print("Emotion: ", demography.get("dominant_emotion"))
|
||||
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is None)
|
||||
evaluate(demography.get("dominant_emotion") is None)
|
||||
evaluate(demography.get("age") is not None)
|
||||
evaluate(demography.get("dominant_gender") is not None)
|
||||
evaluate(demography.get("dominant_race") is None)
|
||||
evaluate(demography.get("dominant_emotion") is None)
|
||||
|
||||
print("-----------------------------------------")
|
||||
|
||||
@ -175,11 +273,12 @@ def test_cases():
|
||||
print("Passing numpy array to analyze function")
|
||||
|
||||
img = cv2.imread("dataset/img1.jpg")
|
||||
resp_obj = DeepFace.analyze(img)
|
||||
print(resp_obj)
|
||||
resp_objs = DeepFace.analyze(img)
|
||||
|
||||
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
|
||||
evaluate(resp_obj["gender"] == "Woman")
|
||||
for resp_obj in resp_objs:
|
||||
print(resp_obj)
|
||||
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
|
||||
evaluate(resp_obj["gender"] == "Woman")
|
||||
|
||||
print("--------------------------")
|
||||
|
||||
@ -190,7 +289,6 @@ def test_cases():
|
||||
|
||||
res = DeepFace.verify(img1, img2)
|
||||
print(res)
|
||||
|
||||
evaluate(res["verified"] == True)
|
||||
|
||||
print("--------------------------")
|
||||
@ -199,11 +297,11 @@ def test_cases():
|
||||
|
||||
img1 = cv2.imread("dataset/img1.jpg")
|
||||
|
||||
df = DeepFace.find(img1, db_path = "dataset")
|
||||
dfs = DeepFace.find(img1, db_path = "dataset")
|
||||
|
||||
print(df.head())
|
||||
|
||||
evaluate(df.shape[0] > 0)
|
||||
for df in dfs:
|
||||
print(df.head())
|
||||
evaluate(df.shape[0] > 0)
|
||||
|
||||
print("--------------------------")
|
||||
|
||||
@ -213,17 +311,18 @@ def test_cases():
|
||||
|
||||
for img1_path, img2_path, verified in dataset:
|
||||
for detector in detectors:
|
||||
result = DeepFace.analyze(img1_path, actions=('gender',), detector_backend=detector, enforce_detection=False)
|
||||
results = DeepFace.analyze(img1_path, actions=('gender',), detector_backend=detector, enforce_detection=False)
|
||||
|
||||
print(result)
|
||||
for result in results:
|
||||
print(result)
|
||||
|
||||
assert 'gender' in result.keys()
|
||||
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
|
||||
assert 'gender' in result.keys()
|
||||
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
|
||||
|
||||
if result["dominant_gender"] == "Man":
|
||||
assert result["gender"]["Man"] > result["gender"]["Woman"]
|
||||
else:
|
||||
assert result["gender"]["Man"] < result["gender"]["Woman"]
|
||||
if result["dominant_gender"] == "Man":
|
||||
assert result["gender"]["Man"] > result["gender"]["Woman"]
|
||||
else:
|
||||
assert result["gender"]["Man"] < result["gender"]["Woman"]
|
||||
|
||||
# ---------------------------------------------
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user