[fix] 1 img input for the Emotion model

This commit is contained in:
NatLee 2025-01-07 05:56:09 +08:00
parent 52a38ba21a
commit ba8c651c7a

View File

@ -72,14 +72,16 @@ class EmotionClient(Demography):
# Preprocessing input image or image list.
imgs = self._preprocess_batch_or_single_input(img)
# Preprocess each image and add channel dimension for grayscale images
if imgs.shape[0] == 1:
# Preprocess single image and add channel dimension for grayscale images
processed_imgs = np.expand_dims(np.array([self._preprocess_image(img) for img in imgs]), axis=0)
else:
# Preprocess batch of images and add channel dimension for grayscale images
processed_imgs = np.expand_dims(np.array([self._preprocess_image(img) for img in imgs]), axis=-1)
# Reshape input for model (expected shape=(n, 48, 48, 1)), where n is the batch size
processed_imgs = processed_imgs.reshape(processed_imgs.shape[0], 48, 48, 1)
# Prediction
# Emotion model input shape is (48, 48, 1, n), where n is the batch size
predictions = self._predict_internal(processed_imgs)
return predictions