Merge pull request #1031 from AndreaLanfranchi/al20240221

Small knob polishing and amend duplicated attempts to add undetectable images to pickle
This commit is contained in:
Sefik Ilkin Serengil 2024-02-22 12:19:07 +00:00 committed by GitHub
commit 14bbc2f938
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 63 additions and 61 deletions

View File

@ -210,6 +210,5 @@ def align_face(
return img, 0 return img, 0
angle = float(np.degrees(np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))) angle = float(np.degrees(np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])))
img = Image.fromarray(img) img = np.array(Image.fromarray(img).rotate(angle))
img = np.array(img.rotate(angle))
return img, angle return img, angle

View File

@ -40,11 +40,10 @@ def build_model(model_name: str) -> Any:
if not "model_obj" in globals(): if not "model_obj" in globals():
model_obj = {} model_obj = {}
if not model_name in model_obj: if not model_name in model_obj.keys():
model = models.get(model_name) model = models.get(model_name)
if model: if model:
model = model() model_obj[model_name] = model()
model_obj[model_name] = model
else: else:
raise ValueError(f"Invalid model_name passed - {model_name}") raise ValueError(f"Invalid model_name passed - {model_name}")

View File

@ -68,9 +68,8 @@ def analysis(
cap = cv2.VideoCapture(source) # webcam cap = cv2.VideoCapture(source) # webcam
while True: while True:
_, img = cap.read() has_frame, img = cap.read()
if not has_frame:
if img is None:
break break
# cv2.namedWindow('img', cv2.WINDOW_FREERATIO) # cv2.namedWindow('img', cv2.WINDOW_FREERATIO)
@ -92,6 +91,8 @@ def analysis(
faces = [] faces = []
for face_obj in face_objs: for face_obj in face_objs:
facial_area = face_obj["facial_area"] facial_area = face_obj["facial_area"]
if facial_area["w"] <= 130: # discard small detected faces
continue
faces.append( faces.append(
( (
facial_area["x"], facial_area["x"],
@ -111,36 +112,32 @@ def analysis(
detected_faces = [] detected_faces = []
face_index = 0 face_index = 0
for x, y, w, h in faces: for x, y, w, h in faces:
if w > 130: # discard small detected faces face_detected = True
if face_index == 0:
face_included_frames += 1 # increase frame for a single face
face_detected = True cv2.rectangle(
if face_index == 0: img, (x, y), (x + w, y + h), (67, 67, 67), 1
face_included_frames = ( ) # draw rectangle to main image
face_included_frames + 1
) # increase frame for a single face
cv2.rectangle( cv2.putText(
img, (x, y), (x + w, y + h), (67, 67, 67), 1 img,
) # draw rectangle to main image str(frame_threshold - face_included_frames),
(int(x + w / 4), int(y + h / 1.5)),
cv2.FONT_HERSHEY_SIMPLEX,
4,
(255, 255, 255),
2,
)
cv2.putText( detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] # crop detected face
img,
str(frame_threshold - face_included_frames),
(int(x + w / 4), int(y + h / 1.5)),
cv2.FONT_HERSHEY_SIMPLEX,
4,
(255, 255, 255),
2,
)
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)] # crop detected face # -------------------------------------
# ------------------------------------- detected_faces.append((x, y, w, h))
face_index = face_index + 1
detected_faces.append((x, y, w, h)) # -------------------------------------
face_index = face_index + 1
# -------------------------------------
if face_detected == True and face_included_frames == frame_threshold and freeze == False: if face_detected == True and face_included_frames == frame_threshold and freeze == False:
freeze = True freeze = True

View File

@ -99,7 +99,7 @@ def find(
file_name = f"representations_{model_name}.pkl" file_name = f"representations_{model_name}.pkl"
file_name = file_name.replace("-", "_").lower() file_name = file_name.replace("-", "_").lower()
datastore_path = f"{db_path}/{file_name}" datastore_path = os.path.join(db_path, file_name)
df_cols = [ df_cols = [
"identity", "identity",
@ -162,7 +162,7 @@ def find(
logger.info( logger.info(
f"{len(newbies)} new representations are just added" f"{len(newbies)} new representations are just added"
f" whereas {len(oldies)} represented one(s) are just dropped" f" whereas {len(oldies)} represented one(s) are just dropped"
f" in {db_path}/{file_name} file." f" in {os.path.join(db_path,file_name)} file."
) )
if not silent: if not silent:
@ -173,8 +173,8 @@ def find(
if len(employees) == 0: if len(employees) == 0:
raise ValueError( raise ValueError(
f"There is no image in {db_path} folder!" f"Could not find any valid image in {db_path} folder!"
"Validate .jpg, .jpeg or .png files exist in this path.", "Valid images are .jpg, .jpeg or .png files.",
) )
# ------------------------ # ------------------------
@ -196,7 +196,7 @@ def find(
pickle.dump(representations, f) pickle.dump(representations, f)
if not silent: if not silent:
logger.info(f"Representations stored in {db_path}/{file_name} file.") logger.info(f"Representations stored in {datastore_path} file.")
# ---------------------------- # ----------------------------
# now, we got representations for facial database # now, we got representations for facial database
@ -241,6 +241,9 @@ def find(
distances = [] distances = []
for _, instance in df.iterrows(): for _, instance in df.iterrows():
source_representation = instance[f"{model_name}_representation"] source_representation = instance[f"{model_name}_representation"]
if source_representation is None:
distances.append(float("inf")) # no representation for this image
continue
target_dims = len(list(target_representation)) target_dims = len(list(target_representation))
source_dims = len(list(source_representation)) source_dims = len(list(source_representation))
@ -292,7 +295,7 @@ def find(
return resp_obj return resp_obj
def __list_images(path: str) -> list: def __list_images(path: str) -> List[str]:
""" """
List images in a given path List images in a given path
Args: Args:
@ -304,7 +307,7 @@ def __list_images(path: str) -> list:
for r, _, f in os.walk(path): for r, _, f in os.walk(path):
for file in f: for file in f:
if file.lower().endswith((".jpg", ".jpeg", ".png")): if file.lower().endswith((".jpg", ".jpeg", ".png")):
exact_path = f"{r}/{file}" exact_path = os.path.join(r, file)
images.append(exact_path) images.append(exact_path)
return images return images
@ -365,31 +368,35 @@ def __find_bulk_embeddings(
expand_percentage=expand_percentage, expand_percentage=expand_percentage,
) )
except ValueError as err: except ValueError as err:
logger.warn( logger.error(
f"Exception while extracting faces from {employee}: {str(err)}. Skipping it." f"Exception while extracting faces from {employee}: {str(err)}"
) )
img_objs = [] img_objs = []
for img_obj in img_objs: if len(img_objs) == 0:
img_content = img_obj["face"] logger.warn(f"No face detected in {employee}. It will be skipped in detection.")
img_region = img_obj["facial_area"] representations.append((employee, None, 0, 0, 0, 0))
embedding_obj = representation.represent( else:
img_path=img_content, for img_obj in img_objs:
model_name=model_name, img_content = img_obj["face"]
enforce_detection=enforce_detection, img_region = img_obj["facial_area"]
detector_backend="skip", embedding_obj = representation.represent(
align=align, img_path=img_content,
normalization=normalization, model_name=model_name,
) enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
img_representation = embedding_obj[0]["embedding"] img_representation = embedding_obj[0]["embedding"]
representations.append((
employee,
img_representation,
img_region["x"],
img_region["y"],
img_region["w"],
img_region["h"]
))
instance = []
instance.append(employee)
instance.append(img_representation)
instance.append(img_region["x"])
instance.append(img_region["y"])
instance.append(img_region["w"])
instance.append(img_region["h"])
representations.append(instance)
return representations return representations