mirror of
https://github.com/serengil/deepface.git
synced 2025-06-08 12:35:22 +00:00
real time implementation
This commit is contained in:
parent
e44e418eba
commit
b1245e7055
@ -584,11 +584,20 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine',
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def stream(db_path = '', model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True):
|
def stream(db_path = '', model_name ='VGG-Face', distance_metric = 'cosine'
|
||||||
|
, enable_face_analysis = True
|
||||||
|
, source = 0, time_threshold = 5, frame_threshold = 5):
|
||||||
|
|
||||||
|
if time_threshold < 1:
|
||||||
|
raise ValueError("time_threshold must be greater than the value 1 but you passed "+str(time_threshold))
|
||||||
|
|
||||||
|
if frame_threshold < 1:
|
||||||
|
raise ValueError("frame_threshold must be greater than the value 1 but you passed "+str(frame_threshold))
|
||||||
|
|
||||||
functions.initialize_detector(detector_backend = 'opencv')
|
functions.initialize_detector(detector_backend = 'opencv')
|
||||||
|
|
||||||
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis)
|
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis
|
||||||
|
, source = source, time_threshold = time_threshold, frame_threshold = frame_threshold)
|
||||||
|
|
||||||
def detectFace(img_path, detector_backend = 'mtcnn'):
|
def detectFace(img_path, detector_backend = 'mtcnn'):
|
||||||
|
|
||||||
|
@ -9,14 +9,14 @@ import re
|
|||||||
import os
|
import os
|
||||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
||||||
|
|
||||||
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID
|
from deepface import DeepFace
|
||||||
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
from deepface.extendedmodels import Age
|
||||||
from deepface.commons import functions, realtime, distance as dst
|
from deepface.commons import functions, realtime, distance as dst
|
||||||
|
|
||||||
def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
def analysis(db_path, model_name, distance_metric, enable_face_analysis = True
|
||||||
|
, source = 0, time_threshold = 5, frame_threshold = 5):
|
||||||
|
|
||||||
input_shape = (224, 224)
|
input_shape = (224, 224); input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
||||||
input_shape_x = input_shape[0]; input_shape_y = input_shape[1]
|
|
||||||
|
|
||||||
text_color = (255,255,255)
|
text_color = (255,255,255)
|
||||||
|
|
||||||
@ -37,41 +37,13 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
#------------------------
|
#------------------------
|
||||||
|
|
||||||
if len(employees) > 0:
|
if len(employees) > 0:
|
||||||
if model_name == 'VGG-Face':
|
|
||||||
print("Using VGG-Face model backend and", distance_metric,"distance.")
|
|
||||||
model = VGGFace.loadModel()
|
|
||||||
input_shape = (224, 224)
|
|
||||||
|
|
||||||
elif model_name == 'OpenFace':
|
model = DeepFace.build_model(model_name)
|
||||||
print("Using OpenFace model backend", distance_metric,"distance.")
|
print(model_name," is built")
|
||||||
model = OpenFace.loadModel()
|
|
||||||
input_shape = (96, 96)
|
|
||||||
|
|
||||||
elif model_name == 'Facenet':
|
|
||||||
print("Using Facenet model backend", distance_metric,"distance.")
|
|
||||||
model = Facenet.loadModel()
|
|
||||||
input_shape = (160, 160)
|
|
||||||
|
|
||||||
elif model_name == 'DeepFace':
|
|
||||||
print("Using FB DeepFace model backend", distance_metric,"distance.")
|
|
||||||
model = FbDeepFace.loadModel()
|
|
||||||
input_shape = (152, 152)
|
|
||||||
|
|
||||||
elif model_name == 'DeepID':
|
|
||||||
print("Using DeepID model backend", distance_metric,"distance.")
|
|
||||||
model = DeepID.loadModel()
|
|
||||||
input_shape = (55, 47)
|
|
||||||
|
|
||||||
elif model_name == 'Dlib':
|
|
||||||
print("Using Dlib model backend", distance_metric,"distance.")
|
|
||||||
from deepface.basemodels.DlibResNet import DlibResNet
|
|
||||||
model = DlibResNet()
|
|
||||||
input_shape = (150, 150)
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError("Invalid model_name passed - ", model_name)
|
|
||||||
#------------------------
|
#------------------------
|
||||||
|
|
||||||
|
input_shape = functions.find_input_shape(model)
|
||||||
input_shape_x = input_shape[0]
|
input_shape_x = input_shape[0]
|
||||||
input_shape_y = input_shape[1]
|
input_shape_y = input_shape[1]
|
||||||
|
|
||||||
@ -85,13 +57,13 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
|
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
|
|
||||||
emotion_model = Emotion.loadModel()
|
emotion_model = DeepFace.build_model('Emotion')
|
||||||
print("Emotion model loaded")
|
print("Emotion model loaded")
|
||||||
|
|
||||||
age_model = Age.loadModel()
|
age_model = DeepFace.build_model('Age')
|
||||||
print("Age model loaded")
|
print("Age model loaded")
|
||||||
|
|
||||||
gender_model = Gender.loadModel()
|
gender_model = DeepFace.build_model('Gender')
|
||||||
print("Gender model loaded")
|
print("Gender model loaded")
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
@ -128,7 +100,6 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
|
|
||||||
#-----------------------
|
#-----------------------
|
||||||
|
|
||||||
time_threshold = 5; frame_threshold = 5
|
|
||||||
pivot_img_size = 112 #face recognition result image
|
pivot_img_size = 112 #face recognition result image
|
||||||
|
|
||||||
#-----------------------
|
#-----------------------
|
||||||
@ -145,8 +116,7 @@ def analysis(db_path, model_name, distance_metric, enable_face_analysis = True):
|
|||||||
freezed_frame = 0
|
freezed_frame = 0
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
|
|
||||||
cap = cv2.VideoCapture(0) #webcam
|
cap = cv2.VideoCapture(source) #webcam
|
||||||
#cap = cv2.VideoCapture("C:/Users/IS96273/Desktop/skype-video-1.mp4") #video
|
|
||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
ret, img = cap.read()
|
ret, img = cap.read()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user