mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 03:55:21 +00:00
bug fix after pr
This commit is contained in:
parent
ba1cb93646
commit
36ef4dc3f1
@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-icon-labeled.png" width="200" height="240"></p>
|
||||
|
||||
Deepface is a lightweight [face recognition](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and facial attribute analysis ([age](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [gender](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [emotion](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) and [race](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/)) framework for python. It is a hybrid face recognition framework wrapping **state-of-the-art** models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/), [`Google FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/) and [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/).
|
||||
Deepface is a lightweight [face recognition](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and facial attribute analysis ([age](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [gender](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [emotion](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) and [race](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/)) framework for python. It is a hybrid face recognition framework wrapping **state-of-the-art** models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/), [`Google FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`Facebook DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/) and [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/).
|
||||
|
||||
Experiments show that human beings have 97.53% accuracy on facial recognition tasks whereas those models already reached and passed that accuracy level.
|
||||
|
||||
@ -195,7 +195,7 @@ Pull requests are welcome. You should run the unit tests locally by running [`te
|
||||
|
||||
## Support
|
||||
|
||||
There are many ways to support a project - starring⭐️ the GitHub repo is just one.
|
||||
There are many ways to support a project - starring⭐️ the GitHub repo is just one 🙏.
|
||||
|
||||
## Citation
|
||||
|
||||
|
@ -66,23 +66,22 @@ def loadBase64Img(uri):
|
||||
return img
|
||||
|
||||
def load_image(img):
|
||||
exact_image = False
|
||||
exact_image = False; base64_img = False; url_img = False
|
||||
|
||||
if type(img).__module__ == np.__name__:
|
||||
exact_image = True
|
||||
|
||||
base64_img = False
|
||||
if len(img) > 11 and img[0:11] == "data:image/":
|
||||
elif len(img) > 11 and img[0:11] == "data:image/":
|
||||
base64_img = True
|
||||
|
||||
url_img = False
|
||||
if len(img) > 11 and img.startswith("http"):
|
||||
elif len(img) > 11 and img.startswith("http"):
|
||||
url_img = True
|
||||
|
||||
#---------------------------
|
||||
|
||||
if base64_img == True:
|
||||
img = loadBase64Img(img)
|
||||
|
||||
|
||||
elif url_img:
|
||||
img = np.array(Image.open(requests.get(img, stream=True).raw))
|
||||
|
||||
@ -196,15 +195,15 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete
|
||||
#resize image to expected shape
|
||||
|
||||
# img = cv2.resize(img, target_size) #resize causes transformation on base image, adding black pixels to resize will not deform the base image
|
||||
|
||||
|
||||
if img.shape[0] > 0 and img.shape[1] > 0:
|
||||
factor_0 = target_size[0] / img.shape[0]
|
||||
factor_1 = target_size[1] / img.shape[1]
|
||||
factor = min(factor_0, factor_1)
|
||||
|
||||
|
||||
dsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))
|
||||
img = cv2.resize(img, dsize)
|
||||
|
||||
|
||||
# Then pad the other side to the target size by adding black pixels
|
||||
diff_0 = target_size[0] - img.shape[0]
|
||||
diff_1 = target_size[1] - img.shape[1]
|
||||
@ -213,9 +212,9 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete
|
||||
img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')
|
||||
else:
|
||||
img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')
|
||||
|
||||
|
||||
#------------------------------------------
|
||||
|
||||
|
||||
#double check: if target image is not still the same size with target.
|
||||
if img.shape[0:2] != target_size:
|
||||
img = cv2.resize(img, target_size)
|
||||
|
Loading…
x
Reference in New Issue
Block a user