mirror of
https://github.com/serengil/deepface.git
synced 2025-06-05 19:15:23 +00:00
fix(represent): change somthing according to comments and adapt pylint
This commit is contained in:
parent
8c6dad0147
commit
7b1451ac50
7
.vscode/settings.json
vendored
7
.vscode/settings.json
vendored
@ -9,5 +9,10 @@
|
|||||||
"python.formatting.provider": "black",
|
"python.formatting.provider": "black",
|
||||||
"python.formatting.blackArgs": ["--line-length=100"],
|
"python.formatting.blackArgs": ["--line-length=100"],
|
||||||
"editor.fontWeight": "normal",
|
"editor.fontWeight": "normal",
|
||||||
"python.analysis.extraPaths": ["./deepface"]
|
"python.analysis.extraPaths": [
|
||||||
|
"./deepface"
|
||||||
|
],
|
||||||
|
"black-formatter.args": [
|
||||||
|
"--line-length=100"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
@ -686,7 +686,8 @@ def represent(
|
|||||||
{
|
{
|
||||||
// Multidimensional vector
|
// Multidimensional vector
|
||||||
// The number of dimensions is changing based on the reference model.
|
// The number of dimensions is changing based on the reference model.
|
||||||
// E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector.
|
// E.g. FaceNet returns 128 dimensional vector;
|
||||||
|
// VGG-Face returns 2622 dimensional vector.
|
||||||
"embedding": np.array,
|
"embedding": np.array,
|
||||||
|
|
||||||
// Detected Facial-Area by Face detection in dict format.
|
// Detected Facial-Area by Face detection in dict format.
|
||||||
@ -716,9 +717,6 @@ def represent(
|
|||||||
align=align,
|
align=align,
|
||||||
)
|
)
|
||||||
else: # skip
|
else: # skip
|
||||||
if type(img_path).__module__ == np.__name__:
|
|
||||||
img = img_path.copy()
|
|
||||||
else:
|
|
||||||
# Try load. If load error, will raise exception internal
|
# Try load. If load error, will raise exception internal
|
||||||
img, _ = functions.load_image(img_path)
|
img, _ = functions.load_image(img_path)
|
||||||
# --------------------------------
|
# --------------------------------
|
||||||
@ -726,10 +724,10 @@ def represent(
|
|||||||
img = img[0] # e.g. (1, 224, 224, 3) to (224, 224, 3)
|
img = img[0] # e.g. (1, 224, 224, 3) to (224, 224, 3)
|
||||||
if len(img.shape) == 3:
|
if len(img.shape) == 3:
|
||||||
img = cv2.resize(img, target_size)
|
img = cv2.resize(img, target_size)
|
||||||
img = np.expand_dims(img, axis=0) # Why we remove a axis=0 previously and here expand one?
|
img = np.expand_dims(img, axis=0)
|
||||||
# when represent is called from verify, this is already normalized. But needed when user given.
|
# when called from verify, this is already normalized. But needed when user given.
|
||||||
if img.max() > 1:
|
if img.max() > 1:
|
||||||
img = img.astype(np.float32) / 255.
|
img = img.astype(np.float32) / 255.0
|
||||||
# --------------------------------
|
# --------------------------------
|
||||||
# make dummy region and confidence to keep compatibility with `extract_faces`
|
# make dummy region and confidence to keep compatibility with `extract_faces`
|
||||||
img_region = {"x": 0, "y": 0, "w": img.shape[1], "h": img.shape[2]}
|
img_region = {"x": 0, "y": 0, "w": img.shape[1], "h": img.shape[2]}
|
||||||
|
@ -94,13 +94,8 @@ def load_image(img):
|
|||||||
if type(img).__module__ == np.__name__:
|
if type(img).__module__ == np.__name__:
|
||||||
return img, None
|
return img, None
|
||||||
|
|
||||||
try:
|
|
||||||
# Test whether img is a Python3's Path. If hit, tranform to str to let following logic work.
|
|
||||||
from pathlib import Path
|
|
||||||
if isinstance(img, Path):
|
if isinstance(img, Path):
|
||||||
img = str(img)
|
img = str(img)
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# The image is a base64 string
|
# The image is a base64 string
|
||||||
if img.startswith("data:image/"):
|
if img.startswith("data:image/"):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user