single and batch distance functions are stored in verify module

This commit is contained in:
Sefik Ilkin Serengil 2024-10-06 20:40:33 +01:00
parent 78cd70e058
commit a93fb63c97
4 changed files with 113 additions and 114 deletions

View File

@ -364,7 +364,7 @@ def find(
silent=silent, silent=silent,
refresh_database=refresh_database, refresh_database=refresh_database,
anti_spoofing=anti_spoofing, anti_spoofing=anti_spoofing,
batched=batched batched=batched,
) )

View File

@ -105,7 +105,7 @@ def download_all_models_in_one_shot() -> None:
Download all model weights in one shot Download all model weights in one shot
""" """
# weight urls as variables # import model weights from module here to avoid circular import issue
from deepface.models.facial_recognition.VGGFace import WEIGHTS_URL as VGGFACE_WEIGHTS from deepface.models.facial_recognition.VGGFace import WEIGHTS_URL as VGGFACE_WEIGHTS
from deepface.models.facial_recognition.Facenet import FACENET128_WEIGHTS, FACENET512_WEIGHTS from deepface.models.facial_recognition.Facenet import FACENET128_WEIGHTS, FACENET512_WEIGHTS
from deepface.models.facial_recognition.OpenFace import WEIGHTS_URL as OPENFACE_WEIGHTS from deepface.models.facial_recognition.OpenFace import WEIGHTS_URL as OPENFACE_WEIGHTS

View File

@ -266,7 +266,7 @@ def find(
align, align,
threshold, threshold,
normalization, normalization,
anti_spoofing anti_spoofing,
) )
df = pd.DataFrame(representations) df = pd.DataFrame(representations)
@ -441,6 +441,7 @@ def __find_bulk_embeddings(
return representations return representations
def find_batched( def find_batched(
representations: List[Dict[str, Any]], representations: List[Dict[str, Any]],
source_objs: List[Dict[str, Any]], source_objs: List[Dict[str, Any]],
@ -508,27 +509,24 @@ def find_batched(
metadata = set() metadata = set()
for item in representations: for item in representations:
emb = item.get('embedding') emb = item.get("embedding")
if emb is not None: if emb is not None:
embeddings_list.append(emb) embeddings_list.append(emb)
valid_mask.append(True) valid_mask.append(True)
else: else:
embeddings_list.append(np.zeros_like(representations[0]['embedding'])) embeddings_list.append(np.zeros_like(representations[0]["embedding"]))
valid_mask.append(False) valid_mask.append(False)
metadata.update(item.keys()) metadata.update(item.keys())
# remove embedding key from other keys # remove embedding key from other keys
metadata.discard('embedding') metadata.discard("embedding")
metadata = list(metadata) metadata = list(metadata)
embeddings = np.array(embeddings_list) # (N, D) embeddings = np.array(embeddings_list) # (N, D)
valid_mask = np.array(valid_mask) # (N,) valid_mask = np.array(valid_mask) # (N,)
data = { data = {key: np.array([item.get(key, None) for item in representations]) for key in metadata}
key: np.array([item.get(key, None) for item in representations])
for key in metadata
}
target_embeddings = [] target_embeddings = []
source_regions = [] source_regions = []
@ -558,101 +556,46 @@ def find_batched(
target_threshold = threshold or verification.find_threshold(model_name, distance_metric) target_threshold = threshold or verification.find_threshold(model_name, distance_metric)
target_thresholds.append(target_threshold) target_thresholds.append(target_threshold)
target_embeddings = np.array(target_embeddings) # (M, D) target_embeddings = np.array(target_embeddings) # (M, D)
target_thresholds = np.array(target_thresholds) # (M,) target_thresholds = np.array(target_thresholds) # (M,)
source_regions_arr = { source_regions_arr = {
'source_x': np.array([region['x'] for region in source_regions]), "source_x": np.array([region["x"] for region in source_regions]),
'source_y': np.array([region['y'] for region in source_regions]), "source_y": np.array([region["y"] for region in source_regions]),
'source_w': np.array([region['w'] for region in source_regions]), "source_w": np.array([region["w"] for region in source_regions]),
'source_h': np.array([region['h'] for region in source_regions]), "source_h": np.array([region["h"] for region in source_regions]),
} }
def find_cosine_distance_batch( distances = verification.find_distance(embeddings, target_embeddings, distance_metric) # (M, N)
embeddings: np.ndarray, target_embeddings: np.ndarray
) -> np.ndarray:
"""
Find the cosine distances between batches of embeddings
Args:
embeddings (np.ndarray): array of shape (N, D)
target_embeddings (np.ndarray): array of shape (M, D)
Returns:
np.ndarray: distance matrix of shape (M, N)
"""
embeddings_norm = verification.l2_normalize(embeddings, axis=1)
target_embeddings_norm = verification.l2_normalize(target_embeddings, axis=1)
cosine_similarities = np.dot(target_embeddings_norm, embeddings_norm.T)
cosine_distances = 1 - cosine_similarities
return cosine_distances
def find_euclidean_distance_batch(
embeddings: np.ndarray, target_embeddings: np.ndarray
) -> np.ndarray:
"""
Find the Euclidean distances between batches of embeddings
Args:
embeddings (np.ndarray): array of shape (N, D)
target_embeddings (np.ndarray): array of shape (M, D)
Returns:
np.ndarray: distance matrix of shape (M, N)
"""
diff = embeddings[None, :, :] - target_embeddings[:, None, :] # (M, N, D)
distances = np.linalg.norm(diff, axis=2) # (M, N)
return distances
def find_distance_batch(
embeddings: np.ndarray, target_embeddings: np.ndarray, distance_metric: str,
) -> np.ndarray:
"""
Find pairwise distances between batches of embeddings using the specified distance metric
Args:
embeddings (np.ndarray): array of shape (N, D)
target_embeddings (np.ndarray): array of shape (M, D)
distance_metric (str): distance metric ('cosine', 'euclidean', 'euclidean_l2')
Returns:
np.ndarray: distance matrix of shape (M, N)
"""
if distance_metric == "cosine":
distances = find_cosine_distance_batch(embeddings, target_embeddings)
elif distance_metric == "euclidean":
distances = find_euclidean_distance_batch(embeddings, target_embeddings)
elif distance_metric == "euclidean_l2":
embeddings_norm = verification.l2_normalize(embeddings, axis=1)
target_embeddings_norm = verification.l2_normalize(target_embeddings, axis=1)
distances = find_euclidean_distance_batch(embeddings_norm, target_embeddings_norm)
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
return np.round(distances, 6)
distances = find_distance_batch(embeddings, target_embeddings, distance_metric) # (M, N)
distances[:, ~valid_mask] = np.inf distances[:, ~valid_mask] = np.inf
resp_obj = [] resp_obj = []
for i in range(len(target_embeddings)): for i in range(len(target_embeddings)):
target_distances = distances[i] # (N,) target_distances = distances[i] # (N,)
target_threshold = target_thresholds[i] target_threshold = target_thresholds[i]
N = embeddings.shape[0] N = embeddings.shape[0]
result_data = dict(data) result_data = dict(data)
result_data.update({ result_data.update(
'source_x': np.full(N, source_regions_arr['source_x'][i]), {
'source_y': np.full(N, source_regions_arr['source_y'][i]), "source_x": np.full(N, source_regions_arr["source_x"][i]),
'source_w': np.full(N, source_regions_arr['source_w'][i]), "source_y": np.full(N, source_regions_arr["source_y"][i]),
'source_h': np.full(N, source_regions_arr['source_h'][i]), "source_w": np.full(N, source_regions_arr["source_w"][i]),
'threshold': np.full(N, target_threshold), "source_h": np.full(N, source_regions_arr["source_h"][i]),
'distance': target_distances, "threshold": np.full(N, target_threshold),
}) "distance": target_distances,
}
)
mask = target_distances <= target_threshold mask = target_distances <= target_threshold
filtered_data = {key: value[mask] for key, value in result_data.items()} filtered_data = {key: value[mask] for key, value in result_data.items()}
sorted_indices = np.argsort(filtered_data['distance']) sorted_indices = np.argsort(filtered_data["distance"])
sorted_data = {key: value[sorted_indices] for key, value in filtered_data.items()} sorted_data = {key: value[sorted_indices] for key, value in filtered_data.items()}
num_results = len(sorted_data['distance']) num_results = len(sorted_data["distance"])
result_dicts = [ result_dicts = [
{key: sorted_data[key][i] for key in sorted_data} {key: sorted_data[key][i] for key in sorted_data} for i in range(num_results)
for i in range(num_results)
] ]
resp_obj.append(result_dicts) resp_obj.append(result_dicts)
return resp_obj return resp_obj

View File

@ -263,14 +263,16 @@ def __extract_faces_and_embeddings(
def find_cosine_distance( def find_cosine_distance(
source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list] source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
) -> np.float64: ) -> Union[np.float64, np.ndarray]:
""" """
Find cosine distance between two given vectors Find cosine distance between two given vectors
Args: Args:
source_representation (np.ndarray or list): 1st vector source_representation (np.ndarray or list): 1st vector
test_representation (np.ndarray or list): 2nd vector test_representation (np.ndarray or list): 2nd vector
Returns Returns
distance (np.float64): calculated cosine distance distance (np.float64 or np.ndarray): calculated cosine distance(s).
it is type of np.float64 for given single embeddings
or type of np.ndarray for given batch embeddings
""" """
if isinstance(source_representation, list): if isinstance(source_representation, list):
source_representation = np.array(source_representation) source_representation = np.array(source_representation)
@ -278,22 +280,41 @@ def find_cosine_distance(
if isinstance(test_representation, list): if isinstance(test_representation, list):
test_representation = np.array(test_representation) test_representation = np.array(test_representation)
a = np.dot(source_representation, test_representation) if len(source_representation.shape) == 1 and len(test_representation.shape) == 1:
b = np.linalg.norm(source_representation) # single embedding
c = np.linalg.norm(test_representation) a = np.dot(source_representation, test_representation)
return 1 - a / (b * c) b = np.linalg.norm(source_representation)
c = np.linalg.norm(test_representation)
distances = 1 - a / (b * c)
elif len(source_representation.shape) == 2 and len(test_representation.shape) == 2:
# list of embeddings (batch)
# source_representation's shape is (N, D)
# test_representation's shape is (M, D)
# distances' shape is (M, N)
source_embeddings_norm = l2_normalize(source_representation, axis=1)
test_embeddings_norm = l2_normalize(test_representation, axis=1)
cosine_similarities = np.dot(test_embeddings_norm, source_embeddings_norm.T)
distances = 1 - cosine_similarities
else:
raise ValueError(
"embeddings can either be 1 or 2 dimensional "
f"but it is {len(source_representation.shape)} & {len(test_representation.shape)}"
)
return distances
def find_euclidean_distance( def find_euclidean_distance(
source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list] source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
) -> np.float64: ) -> Union[np.float64, np.ndarray]:
""" """
Find euclidean distance between two given vectors Find euclidean distance between two given vectors
Args: Args:
source_representation (np.ndarray or list): 1st vector source_representation (np.ndarray or list): 1st vector
test_representation (np.ndarray or list): 2nd vector test_representation (np.ndarray or list): 2nd vector
Returns Returns
distance (np.float64): calculated euclidean distance distance (np.float64 or np.ndarray): calculated euclidean distance(s).
it is type of np.float64 for given single embeddings
or type of np.ndarray for given batch embeddings
""" """
if isinstance(source_representation, list): if isinstance(source_representation, list):
source_representation = np.array(source_representation) source_representation = np.array(source_representation)
@ -301,7 +322,23 @@ def find_euclidean_distance(
if isinstance(test_representation, list): if isinstance(test_representation, list):
test_representation = np.array(test_representation) test_representation = np.array(test_representation)
return np.linalg.norm(source_representation - test_representation) if len(source_representation.shape) == 1 and len(test_representation.shape) == 1:
# single embedding
diff = source_representation - test_representation
distances = np.linalg.norm(diff)
elif len(source_representation.shape) == 2 and len(test_representation.shape) == 2:
# list of embeddings (batch)
# source_representation's shape is (N, D)
# test_representation's shape is (M, D)
# distances' shape is (M, N)
diff = source_representation[None, :, :] - test_representation[:, None, :] # (M, N, D)
distances = np.linalg.norm(diff, axis=2) # (M, N)
else:
raise ValueError(
"embeddings can either be 1 or 2 dimensional "
f"but it is {len(source_representation.shape)} & {len(test_representation.shape)}"
)
return distances
def l2_normalize( def l2_normalize(
@ -325,22 +362,41 @@ def find_distance(
alpha_embedding: Union[np.ndarray, list], alpha_embedding: Union[np.ndarray, list],
beta_embedding: Union[np.ndarray, list], beta_embedding: Union[np.ndarray, list],
distance_metric: str, distance_metric: str,
) -> np.float64: ) -> Union[np.float64, np.ndarray]:
""" """
Wrapper to find distance between vectors according to the given distance metric Wrapper to find distance between vectors according to the given distance metric
Args: Args:
source_representation (np.ndarray or list): 1st vector source_representation (np.ndarray or list): 1st vector
test_representation (np.ndarray or list): 2nd vector test_representation (np.ndarray or list): 2nd vector
Returns Returns
distance (np.float64): calculated cosine distance distance (np.float64 or np.ndarray): calculated cosine distance(s).
it is type of np.float64 for given single embeddings
or type of np.ndarray for given batch embeddings
""" """
if isinstance(alpha_embedding, list):
alpha_embedding = np.array(alpha_embedding)
if isinstance(beta_embedding, list):
beta_embedding = np.array(beta_embedding)
if distance_metric == "cosine": if distance_metric == "cosine":
distance = find_cosine_distance(alpha_embedding, beta_embedding) distance = find_cosine_distance(alpha_embedding, beta_embedding)
elif distance_metric == "euclidean": elif distance_metric == "euclidean":
distance = find_euclidean_distance(alpha_embedding, beta_embedding) distance = find_euclidean_distance(alpha_embedding, beta_embedding)
elif distance_metric == "euclidean_l2": elif distance_metric == "euclidean_l2":
if len(alpha_embedding.shape) == 1 and len(beta_embedding.shape) == 1:
# single embedding
axis = None
elif len(alpha_embedding.shape) == 2 and len(beta_embedding.shape) == 2:
# list of embeddings (batch)
axis = 1
else:
raise ValueError(
"embeddings can either be 1 or 2 dimensional "
f"but it is {len(alpha_embedding.shape)} & {len(beta_embedding.shape)}"
)
distance = find_euclidean_distance( distance = find_euclidean_distance(
l2_normalize(alpha_embedding), l2_normalize(beta_embedding) l2_normalize(alpha_embedding, axis=axis), l2_normalize(beta_embedding, axis=axis)
) )
else: else:
raise ValueError("Invalid distance_metric passed - ", distance_metric) raise ValueError("Invalid distance_metric passed - ", distance_metric)