diff --git a/deepface/modules/detection.py b/deepface/modules/detection.py index 1e96b05..fc9ef6c 100644 --- a/deepface/modules/detection.py +++ b/deepface/modules/detection.py @@ -210,6 +210,5 @@ def align_face( return img, 0 angle = float(np.degrees(np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))) - img = Image.fromarray(img) - img = np.array(img.rotate(angle)) + img = np.array(Image.fromarray(img).rotate(angle)) return img, angle diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py index f9c6464..8cd603c 100644 --- a/deepface/modules/modeling.py +++ b/deepface/modules/modeling.py @@ -40,11 +40,10 @@ def build_model(model_name: str) -> Any: if not "model_obj" in globals(): model_obj = {} - if not model_name in model_obj: + if not model_name in model_obj.keys(): model = models.get(model_name) if model: - model = model() - model_obj[model_name] = model + model_obj[model_name] = model() else: raise ValueError(f"Invalid model_name passed - {model_name}") diff --git a/deepface/modules/realtime.py b/deepface/modules/realtime.py index b87377f..8abbda8 100644 --- a/deepface/modules/realtime.py +++ b/deepface/modules/realtime.py @@ -68,9 +68,8 @@ def analysis( cap = cv2.VideoCapture(source) # webcam while True: - _, img = cap.read() - - if img is None: + has_frame, img = cap.read() + if not has_frame: break # cv2.namedWindow('img', cv2.WINDOW_FREERATIO) @@ -92,6 +91,8 @@ def analysis( faces = [] for face_obj in face_objs: facial_area = face_obj["facial_area"] + if facial_area["w"] <= 130: # discard small detected faces + continue faces.append( ( facial_area["x"], @@ -111,36 +112,32 @@ def analysis( detected_faces = [] face_index = 0 for x, y, w, h in faces: - if w > 130: # discard small detected faces + face_detected = True + if face_index == 0: + face_included_frames += 1 # increase frame for a single face - face_detected = True - if face_index == 0: - face_included_frames = ( - face_included_frames + 1 - ) # increase frame for a single face + cv2.rectangle( + img, (x, y), (x + w, y + h), (67, 67, 67), 1 + ) # draw rectangle to main image - cv2.rectangle( - img, (x, y), (x + w, y + h), (67, 67, 67), 1 - ) # draw rectangle to main image + cv2.putText( + img, + str(frame_threshold - face_included_frames), + (int(x + w / 4), int(y + h / 1.5)), + cv2.FONT_HERSHEY_SIMPLEX, + 4, + (255, 255, 255), + 2, + ) - cv2.putText( - img, - str(frame_threshold - face_included_frames), - (int(x + w / 4), int(y + h / 1.5)), - cv2.FONT_HERSHEY_SIMPLEX, - 4, - (255, 255, 255), - 2, - ) + detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] # crop detected face - detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] # crop detected face + # ------------------------------------- - # ------------------------------------- + detected_faces.append((x, y, w, h)) + face_index = face_index + 1 - detected_faces.append((x, y, w, h)) - face_index = face_index + 1 - - # ------------------------------------- + # ------------------------------------- if face_detected == True and face_included_frames == frame_threshold and freeze == False: freeze = True diff --git a/deepface/modules/recognition.py b/deepface/modules/recognition.py index b771e14..e8545b1 100644 --- a/deepface/modules/recognition.py +++ b/deepface/modules/recognition.py @@ -99,7 +99,7 @@ def find( file_name = f"representations_{model_name}.pkl" file_name = file_name.replace("-", "_").lower() - datastore_path = f"{db_path}/{file_name}" + datastore_path = os.path.join(db_path, file_name) df_cols = [ "identity", @@ -162,7 +162,7 @@ def find( logger.info( f"{len(newbies)} new representations are just added" f" whereas {len(oldies)} represented one(s) are just dropped" - f" in {db_path}/{file_name} file." + f" in {os.path.join(db_path,file_name)} file." ) if not silent: @@ -173,8 +173,8 @@ def find( if len(employees) == 0: raise ValueError( - f"There is no image in {db_path} folder!" - "Validate .jpg, .jpeg or .png files exist in this path.", + f"Could not find any valid image in {db_path} folder!" + "Valid images are .jpg, .jpeg or .png files.", ) # ------------------------ @@ -196,7 +196,7 @@ def find( pickle.dump(representations, f) if not silent: - logger.info(f"Representations stored in {db_path}/{file_name} file.") + logger.info(f"Representations stored in {datastore_path} file.") # ---------------------------- # now, we got representations for facial database @@ -241,6 +241,9 @@ def find( distances = [] for _, instance in df.iterrows(): source_representation = instance[f"{model_name}_representation"] + if source_representation is None: + distances.append(float("inf")) # no representation for this image + continue target_dims = len(list(target_representation)) source_dims = len(list(source_representation)) @@ -292,7 +295,7 @@ def find( return resp_obj -def __list_images(path: str) -> list: +def __list_images(path: str) -> List[str]: """ List images in a given path Args: @@ -304,7 +307,7 @@ def __list_images(path: str) -> list: for r, _, f in os.walk(path): for file in f: if file.lower().endswith((".jpg", ".jpeg", ".png")): - exact_path = f"{r}/{file}" + exact_path = os.path.join(r, file) images.append(exact_path) return images @@ -365,31 +368,35 @@ def __find_bulk_embeddings( expand_percentage=expand_percentage, ) except ValueError as err: - logger.warn( - f"Exception while extracting faces from {employee}: {str(err)}. Skipping it." + logger.error( + f"Exception while extracting faces from {employee}: {str(err)}" ) img_objs = [] - for img_obj in img_objs: - img_content = img_obj["face"] - img_region = img_obj["facial_area"] - embedding_obj = representation.represent( - img_path=img_content, - model_name=model_name, - enforce_detection=enforce_detection, - detector_backend="skip", - align=align, - normalization=normalization, - ) + if len(img_objs) == 0: + logger.warn(f"No face detected in {employee}. It will be skipped in detection.") + representations.append((employee, None, 0, 0, 0, 0)) + else: + for img_obj in img_objs: + img_content = img_obj["face"] + img_region = img_obj["facial_area"] + embedding_obj = representation.represent( + img_path=img_content, + model_name=model_name, + enforce_detection=enforce_detection, + detector_backend="skip", + align=align, + normalization=normalization, + ) - img_representation = embedding_obj[0]["embedding"] + img_representation = embedding_obj[0]["embedding"] + representations.append(( + employee, + img_representation, + img_region["x"], + img_region["y"], + img_region["w"], + img_region["h"] + )) - instance = [] - instance.append(employee) - instance.append(img_representation) - instance.append(img_region["x"]) - instance.append(img_region["y"]) - instance.append(img_region["w"]) - instance.append(img_region["h"]) - representations.append(instance) return representations