diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index fa4bc86..f9fee0b 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1,2 @@ +github: serengil patreon: serengil?repo=deepface diff --git a/README.md b/README.md index 344ee9b..029ab4c 100644 --- a/README.md +++ b/README.md @@ -229,7 +229,9 @@ Face recognition models are actually CNN models and they expect standard sized i The performance of RetinaFace is very satisfactory even in the crowd as seen in the following illustration. Besides, it comes with an incredible facial landmark detection performance. Highlighted red points show some facial landmarks such as eyes, nose and mouth. That's why, alignment score of RetinaFace is high as well. -

+

+
The Yellow Angels - Fenerbahce Women's Volleyball Team +

You can find out more about RetinaFace on this [repo](https://github.com/serengil/retinaface). @@ -306,7 +308,7 @@ You can use deepface not just for facial recognition tasks. It's very common to

-**Celebrity Look-Alike Prediction** - [`Vlog`](https://youtu.be/RMgIKU1H8DY), [`Tutorial`](https://sefiks.com/2019/05/05/celebrity-look-alike-face-recognition-with-deep-learning-in-keras/) +**Celebrity Look-Alike Prediction** - [`Vlog`](https://youtu.be/jaxkEn-Kieo), [`Tutorial`](https://sefiks.com/2019/05/05/celebrity-look-alike-face-recognition-with-deep-learning-in-keras/)

diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py index 9a038a6..ee57f72 100644 --- a/deepface/DeepFace.py +++ b/deepface/DeepFace.py @@ -71,9 +71,10 @@ def build_model(model_name): def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'opencv', distance_metric = 'cosine', enforce_detection = True, align = True, normalization = 'base'): """ - This function verifies an image pair is same person or different persons. + This function verifies an image pair is same person or different persons. In the background, verification function represents facial images as vectors and then calculates the similarity between those vectors. Vectors of same person images should have more similarity (or less distance) than vectors of different persons. Parameters: +<<<<<<< HEAD img1_path, img2_path: exact image path as string. numpy array (BGR) or based64 encoded images are also welcome. If one of pair has more than one face, then we will compare the face pair with max similarity. @@ -83,8 +84,26 @@ def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'op enforce_detection (boolean): If no face could not be detected in an image, then this function will return exception by default. Set this to False not to have this exception. This might be convenient for low resolution images. +======= + img1_path, img2_path (string): exact image path. Alternatively, numpy array (BGR) or based64 encoded images could be passed. If you do not want to call verify function in a for loop, then you can pass an list of pairs as shown below. - detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib + e.g. img1_path = [ + ['img1.jpg', 'img2.jpg'], + ['img2.jpg', 'img3.jpg'] + ] + + model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble + + distance_metric (string): cosine, euclidean, euclidean_l2 + + model: Pre-built deepface model. You can pass pre-built face recognition model optionally. + + model = DeepFace.build_model('VGG-Face') + + enforce_detection (boolean): If no face could not be detected in an image, then this function will return exception by default. Set this to False not to have this exception. This might be convenient for low resolution images. +>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3 + + detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe Returns: Verify function returns a dictionary. If img1_path is a list of image pairs, then the function will return list of dictionary. @@ -188,18 +207,40 @@ def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'op def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_detection = True, detector_backend = 'opencv', align = True, silent = False): """ - This function analyzes facial attributes including age, gender, emotion and race + This function analyzes facial attributes including age, gender, emotion and race. In the background, analysis function builds convolutional neural network models to classify age, gender, emotion and race of the input image. Parameters: +<<<<<<< HEAD img_path: exact image path, numpy array (BGR) or base64 encoded image could be passed. actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop some of those attributes. +======= + img_path (string): exact image path. Alterntively, numpy array (BGR) or base64 encoded image could be passed. If you are going to analyze lots of images, then set this to list. e.g. img_path = ['img1.jpg', 'img2.jpg'] + + actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop some of those attributes. + + models: facial attribute analysis models are built in every call of analyze function. You can pass pre-built models with this argument. + + models = {} + models['age'] = DeepFace.build_model('Age') + models['gender'] = DeepFace.build_model('Gender') + models['emotion'] = DeepFace.build_model('Emotion') + models['race'] = DeepFace.build_model('Race') + +>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3 enforce_detection (boolean): The function throws exception if no face detected by default. Set this to False if you don't want to get exception. This might be convenient for low resolution images. - detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib. + detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe. +<<<<<<< HEAD silent (boolean): disable (some) log messages +======= + prog_bar (boolean): enable/disable a progress bar + + Returns: + The function returns a dictionary. If img_path is a list, then it will return list of dictionary. +>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3 Returns: The function returns a list of dictionaries for each face appearing in the image. @@ -334,13 +375,13 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', db_path (string): You should store some .jpg files in a folder and pass the exact folder path to this. - model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib or Ensemble + model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble distance_metric (string): cosine, euclidean, euclidean_l2 enforce_detection (boolean): The function throws exception if a face could not be detected. Set this to True if you don't want to get exception. This might be convenient for low resolution images. - detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib + detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe silent (boolean): disable some logging and progress bars @@ -494,16 +535,23 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection = True, detector_backend = 'opencv', align = True, normalization = 'base'): """ - This function represents facial images as vectors. + This function represents facial images as vectors. The function uses convolutional neural networks models to generate vector embeddings. Parameters: - img_path: exact image path, numpy array (BGR) or based64 encoded images could be passed. + img_path (string): exact image path. Alternatively, numpy array (BGR) or based64 encoded images could be passed. - model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib, ArcFace. +<<<<<<< HEAD +======= + model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble + + model: Built deepface model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times. Consider to pass model if you are going to call represent function in a for loop. + model = DeepFace.build_model('VGG-Face') + +>>>>>>> 658e2f987edc8c054e8cbba580025bcebbf17dc3 enforce_detection (boolean): If any face could not be detected in an image, then verify function will return exception. Set this to False not to have this exception. This might be convenient for low resolution images. - detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib + detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe normalization (string): normalize the input image before feeding to model @@ -564,9 +612,9 @@ def stream(db_path = '', model_name ='VGG-Face', detector_backend = 'opencv', di Parameters: db_path (string): facial database path. You should store some .jpg files in this folder. - model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib or Ensemble + model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble - detector_backend (string): opencv, ssd, mtcnn, dlib, retinaface + detector_backend (string): opencv, retinaface, mtcnn, ssd, dlib or mediapipe distance_metric (string): cosine, euclidean, euclidean_l2 diff --git a/tests/unit_tests.py b/tests/unit_tests.py index 30d6196..8add53b 100644 --- a/tests/unit_tests.py +++ b/tests/unit_tests.py @@ -34,7 +34,7 @@ def evaluate(condition): # ------------------------------------------------ -detectors = ['opencv', 'mtcnn', 'retinaface'] +detectors = ['opencv', 'mtcnn'] models = ['VGG-Face', 'Facenet', 'Facenet512', 'ArcFace', 'SFace'] metrics = ['cosine', 'euclidean', 'euclidean_l2'] @@ -241,4 +241,4 @@ if test_score > expected_coverage: else: print("min required test coverage is NOT satisfied") -assert test_score > expected_coverage \ No newline at end of file +assert test_score > expected_coverage