mirror of
https://github.com/serengil/deepface.git
synced 2025-06-08 20:45:22 +00:00
alignment fix for emotion
This commit is contained in:
parent
ec3f64dbd8
commit
905e1b082b
@ -9,13 +9,13 @@ import pandas as pd
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import json
|
import json
|
||||||
|
|
||||||
#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
||||||
#from extendedmodels import Age, Gender, Race, Emotion
|
from extendedmodels import Age, Gender, Race, Emotion
|
||||||
#from commons import functions, distance as dst
|
from commons import functions, distance as dst
|
||||||
|
|
||||||
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
#from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
|
||||||
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
#from deepface.extendedmodels import Age, Gender, Race, Emotion
|
||||||
from deepface.commons import functions, distance as dst
|
#from deepface.commons import functions, distance as dst
|
||||||
|
|
||||||
def verify(img1_path, img2_path
|
def verify(img1_path, img2_path
|
||||||
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
|
, model_name ='VGG-Face', distance_metric = 'cosine', plot = False):
|
||||||
@ -129,6 +129,9 @@ def verify(img1_path, img2_path
|
|||||||
|
|
||||||
def analyze(img_path, actions= []):
|
def analyze(img_path, actions= []):
|
||||||
|
|
||||||
|
if os.path.isfile(img_path) != True:
|
||||||
|
raise ValueError("Confirm that ",img_path," exists")
|
||||||
|
|
||||||
resp_obj = "{"
|
resp_obj = "{"
|
||||||
|
|
||||||
#if a specific target is not passed, then find them all
|
#if a specific target is not passed, then find them all
|
||||||
|
@ -128,10 +128,7 @@ def detectFace(image_path, target_size=(224, 224), grayscale = False):
|
|||||||
face_detector = cv2.CascadeClassifier(face_detector_path)
|
face_detector = cv2.CascadeClassifier(face_detector_path)
|
||||||
eye_detector = cv2.CascadeClassifier(eye_detector_path)
|
eye_detector = cv2.CascadeClassifier(eye_detector_path)
|
||||||
|
|
||||||
if grayscale != True:
|
img = cv2.imread(image_path)
|
||||||
img = cv2.imread(image_path)
|
|
||||||
else: #gray scale
|
|
||||||
img = cv2.imread(image_path, 0)
|
|
||||||
|
|
||||||
img_raw = img.copy()
|
img_raw = img.copy()
|
||||||
|
|
||||||
@ -229,6 +226,10 @@ def detectFace(image_path, target_size=(224, 224), grayscale = False):
|
|||||||
#face alignment block end
|
#face alignment block end
|
||||||
#---------------------------
|
#---------------------------
|
||||||
|
|
||||||
|
#face alignment block needs colorful images. that's why, converting to gray scale logic moved to here.
|
||||||
|
if grayscale == True:
|
||||||
|
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
detected_face = cv2.resize(detected_face, target_size)
|
detected_face = cv2.resize(detected_face, target_size)
|
||||||
|
|
||||||
img_pixels = image.img_to_array(detected_face)
|
img_pixels = image.img_to_array(detected_face)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user