Pass custom path argument while loading model

Add `model_path` parameter in `loadModel()` function. This adds more
flexiblity while loading the models.

Also, refactor code using standard `os.path.join` which will make sure
to join the paths correctly.
This commit is contained in:
NISH1001 2020-05-20 15:55:08 +05:45
parent 892cbae56a
commit 2c0a507994
7 changed files with 3448 additions and 1447 deletions

2
.gitignore vendored
View File

@ -13,4 +13,4 @@ deepface/__pycache__/*
deepface/commons/__pycache__/* deepface/commons/__pycache__/*
deepface/basemodels/__pycache__/* deepface/basemodels/__pycache__/*
deepface/extendedmodels/__pycache__/* deepface/extendedmodels/__pycache__/*
deepface/subsidiarymodels/__pycache__/* deepface/subsidiarymodels/__pycache__/*

View File

@ -1,5 +1,6 @@
from keras.preprocessing import image from keras.preprocessing import image
import warnings import warnings
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
import time import time
import os import os
@ -12,330 +13,397 @@ from keras import backend as K
import keras import keras
import tensorflow as tf import tensorflow as tf
#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace # from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#from extendedmodels import Age, Gender, Race, Emotion # from extendedmodels import Age, Gender, Race, Emotion
#from commons import functions, realtime, distance as dst # from commons import functions, realtime, distance as dst
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst from deepface.commons import functions, realtime, distance as dst
def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True):
tic = time.time()
if type(img1_path) == list:
bulkProcess = True
img_list = img1_path.copy()
else:
bulkProcess = False
img_list = [[img1_path, img2_path]]
#------------------------------
if model == None:
if model_name == 'VGG-Face':
print("Using VGG-Face model backend and", distance_metric,"distance.")
model = VGGFace.loadModel()
elif model_name == 'OpenFace':
print("Using OpenFace model backend", distance_metric,"distance.")
model = OpenFace.loadModel()
elif model_name == 'Facenet': def verify(
print("Using Facenet model backend", distance_metric,"distance.") img1_path,
model = Facenet.loadModel() img2_path="",
model_name="VGG-Face",
distance_metric="cosine",
model=None,
enforce_detection=True,
):
tic = time.time()
if type(img1_path) == list:
bulkProcess = True
img_list = img1_path.copy()
else:
bulkProcess = False
img_list = [[img1_path, img2_path]]
# ------------------------------
if model == None:
if model_name == "VGG-Face":
print("Using VGG-Face model backend and", distance_metric, "distance.")
model = VGGFace.loadModel()
elif model_name == "OpenFace":
print("Using OpenFace model backend", distance_metric, "distance.")
model = OpenFace.loadModel()
elif model_name == "Facenet":
print("Using Facenet model backend", distance_metric, "distance.")
model = Facenet.loadModel()
elif model_name == "DeepFace":
print("Using FB DeepFace model backend", distance_metric, "distance.")
model = FbDeepFace.loadModel()
elif model_name == 'DeepFace': else:
print("Using FB DeepFace model backend", distance_metric,"distance.") raise ValueError("Invalid model_name passed - ", model_name)
model = FbDeepFace.loadModel() else: # model != None
print("Already built model is passed")
else: # ------------------------------
raise ValueError("Invalid model_name passed - ", model_name) # face recognition models have different size of inputs
else: #model != None input_shape = model.layers[0].input_shape[1:3]
print("Already built model is passed")
#------------------------------ # ------------------------------
#face recognition models have different size of inputs
input_shape = model.layers[0].input_shape[1:3]
#------------------------------ # tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
# ------------------------------
pbar = tqdm(range(0, len(img_list)), desc="Verification")
#tuned thresholds for model and metric pair resp_objects = []
threshold = functions.findThreshold(model_name, distance_metric)
#------------------------------ # for instance in img_list:
pbar = tqdm(range(0,len(img_list)), desc='Verification') for index in pbar:
resp_objects = []
#for instance in img_list:
for index in pbar:
instance = img_list[index]
if type(instance) == list and len(instance) >= 2:
img1_path = instance[0]
img2_path = instance[1]
#---------------------- instance = img_list[index]
#crop and align faces
img1 = functions.detectFace(img1_path, input_shape, enforce_detection = enforce_detection) if type(instance) == list and len(instance) >= 2:
img2 = functions.detectFace(img2_path, input_shape, enforce_detection = enforce_detection) img1_path = instance[0]
img2_path = instance[1]
#---------------------- # ----------------------
#find embeddings # crop and align faces
img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:]
#---------------------- img1 = functions.detectFace(
#find distances between embeddings img1_path, input_shape, enforce_detection=enforce_detection
)
if distance_metric == 'cosine': img2 = functions.detectFace(
distance = dst.findCosineDistance(img1_representation, img2_representation) img2_path, input_shape, enforce_detection=enforce_detection
elif distance_metric == 'euclidean': )
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
#----------------------
#decision
if distance <= threshold:
identified = "true"
else:
identified = "false"
#----------------------
#response object
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += "}"
resp_obj = json.loads(resp_obj) #string to json
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
#K.clear_session()
return resp_obj
#----------------------
else:
raise ValueError("Invalid arguments passed to verify function: ", instance)
#-------------------------
toc = time.time()
#print("identification lasts ",toc-tic," seconds")
if bulkProcess == True:
resp_obj = "{"
for i in range(0, len(resp_objects)): # ----------------------
resp_item = json.dumps(resp_objects[i]) # find embeddings
if i > 0: img1_representation = model.predict(img1)[0, :]
resp_obj += ", " img2_representation = model.predict(img2)[0, :]
resp_obj += "\"pair_"+str(i+1)+"\": "+resp_item # ----------------------
resp_obj += "}" # find distances between embeddings
resp_obj = json.loads(resp_obj)
return resp_obj
#return resp_objects
if distance_metric == "cosine":
distance = dst.findCosineDistance(
img1_representation, img2_representation
)
elif distance_metric == "euclidean":
distance = dst.findEuclideanDistance(
img1_representation, img2_representation
)
elif distance_metric == "euclidean_l2":
distance = dst.findEuclideanDistance(
dst.l2_normalize(img1_representation),
dst.l2_normalize(img2_representation),
)
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
# ----------------------
# decision
if distance <= threshold:
identified = "true"
else:
identified = "false"
def analyze(img_path, actions = [], models = {}, enforce_detection = True): # ----------------------
# response object
if type(img_path) == list: resp_obj = "{"
img_paths = img_path.copy() resp_obj += '"verified": ' + identified
bulkProcess = True resp_obj += ', "distance": ' + str(distance)
else: resp_obj += ', "max_threshold_to_verify": ' + str(threshold)
img_paths = [img_path] resp_obj += ', "model": "' + model_name + '"'
bulkProcess = False resp_obj += ', "similarity_metric": "' + distance_metric + '"'
resp_obj += "}"
#--------------------------------- resp_obj = json.loads(resp_obj) # string to json
#if a specific target is not passed, then find them all if bulkProcess == True:
if len(actions) == 0: resp_objects.append(resp_obj)
actions= ['emotion', 'age', 'gender', 'race'] else:
# K.clear_session()
return resp_obj
# ----------------------
print("Actions to do: ", actions) else:
raise ValueError("Invalid arguments passed to verify function: ", instance)
#--------------------------------- # -------------------------
if 'emotion' in actions: toc = time.time()
if 'emotion' in models:
print("already built emotion model is passed") # print("identification lasts ",toc-tic," seconds")
emotion_model = models['emotion']
else: if bulkProcess == True:
emotion_model = Emotion.loadModel() resp_obj = "{"
for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i])
if i > 0:
resp_obj += ", "
if 'age' in actions: resp_obj += '"pair_' + str(i + 1) + '": ' + resp_item
if 'age' in models: resp_obj += "}"
print("already built age model is passed") resp_obj = json.loads(resp_obj)
age_model = models['age'] return resp_obj
else: # return resp_objects
age_model = Age.loadModel()
if 'gender' in actions: def analyze(img_path, actions=[], models={}, enforce_detection=True):
if 'gender' in models:
print("already built gender model is passed") if type(img_path) == list:
gender_model = models['gender'] img_paths = img_path.copy()
else: bulkProcess = True
gender_model = Gender.loadModel() else:
img_paths = [img_path]
if 'race' in actions: bulkProcess = False
if 'race' in models:
print("already built race model is passed") # ---------------------------------
race_model = models['race']
else: # if a specific target is not passed, then find them all
race_model = Race.loadModel() if len(actions) == 0:
#--------------------------------- actions = ["emotion", "age", "gender", "race"]
resp_objects = [] print("Actions to do: ", actions)
global_pbar = tqdm(range(0,len(img_paths)), desc='Analyzing') # ---------------------------------
#for img_path in img_paths: if "emotion" in actions:
for j in global_pbar: if "emotion" in models:
img_path = img_paths[j] print("already built emotion model is passed")
emotion_model = models["emotion"]
resp_obj = "{" else:
emotion_model = Emotion.loadModel()
#TO-DO: do this in parallel
if "age" in actions:
pbar = tqdm(range(0,len(actions)), desc='Finding actions') if "age" in models:
print("already built age model is passed")
action_idx = 0 age_model = models["age"]
img_224 = None # Set to prevent re-detection else:
#for action in actions: age_model = Age.loadModel()
for index in pbar:
action = actions[index] if "gender" in actions:
pbar.set_description("Action: %s" % (action)) if "gender" in models:
print("already built gender model is passed")
if action_idx > 0: gender_model = models["gender"]
resp_obj += ", " else:
gender_model = Gender.loadModel()
if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] if "race" in actions:
img = functions.detectFace(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection) if "race" in models:
print("already built race model is passed")
emotion_predictions = emotion_model.predict(img)[0,:] race_model = models["race"]
else:
sum_of_predictions = emotion_predictions.sum() race_model = Race.loadModel()
# ---------------------------------
emotion_obj = "\"emotion\": {"
for i in range(0, len(emotion_labels)): resp_objects = []
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions global_pbar = tqdm(range(0, len(img_paths)), desc="Analyzing")
if i > 0: emotion_obj += ", " # for img_path in img_paths:
for j in global_pbar:
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction) img_path = img_paths[j]
emotion_obj += "}" resp_obj = "{"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)]) # TO-DO: do this in parallel
resp_obj += emotion_obj pbar = tqdm(range(0, len(actions)), desc="Finding actions")
elif action == 'age': action_idx = 0
if img_224 is None: img_224 = None # Set to prevent re-detection
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images # for action in actions:
#print("age prediction") for index in pbar:
age_predictions = age_model.predict(img_224)[0,:] action = actions[index]
apparent_age = Age.findApparentAge(age_predictions) pbar.set_description("Action: %s" % (action))
resp_obj += "\"age\": %s" % (apparent_age) if action_idx > 0:
resp_obj += ", "
elif action == 'gender':
if img_224 is None: if action == "emotion":
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images emotion_labels = [
#print("gender prediction") "angry",
"disgust",
gender_prediction = gender_model.predict(img_224)[0,:] "fear",
"happy",
if np.argmax(gender_prediction) == 0: "sad",
gender = "Woman" "surprise",
elif np.argmax(gender_prediction) == 1: "neutral",
gender = "Man" ]
img = functions.detectFace(
resp_obj += "\"gender\": \"%s\"" % (gender) img_path,
target_size=(48, 48),
elif action == 'race': grayscale=True,
if img_224 is None: enforce_detection=enforce_detection,
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images )
race_predictions = race_model.predict(img_224)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic'] emotion_predictions = emotion_model.predict(img)[0, :]
sum_of_predictions = race_predictions.sum() sum_of_predictions = emotion_predictions.sum()
race_obj = "\"race\": {" emotion_obj = '"emotion": {'
for i in range(0, len(race_labels)): for i in range(0, len(emotion_labels)):
race_label = race_labels[i] emotion_label = emotion_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions emotion_prediction = (
100 * emotion_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", " )
race_obj += "\"%s\": %s" % (race_label, race_prediction) if i > 0:
emotion_obj += ", "
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)]) emotion_obj += '"%s": %s' % (emotion_label, emotion_prediction)
resp_obj += race_obj emotion_obj += "}"
action_idx = action_idx + 1 emotion_obj += ', "dominant_emotion": "%s"' % (
emotion_labels[np.argmax(emotion_predictions)]
resp_obj += "}" )
resp_obj = json.loads(resp_obj) resp_obj += emotion_obj
if bulkProcess == True: elif action == "age":
resp_objects.append(resp_obj) if img_224 is None:
else: img_224 = functions.detectFace(
return resp_obj img_path,
target_size=(224, 224),
if bulkProcess == True: grayscale=False,
resp_obj = "{" enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
for i in range(0, len(resp_objects)): # print("age prediction")
resp_item = json.dumps(resp_objects[i]) age_predictions = age_model.predict(img_224)[0, :]
apparent_age = Age.findApparentAge(age_predictions)
if i > 0:
resp_obj += ", " resp_obj += '"age": %s' % (apparent_age)
resp_obj += "\"instance_"+str(i+1)+"\": "+resp_item elif action == "gender":
resp_obj += "}" if img_224 is None:
resp_obj = json.loads(resp_obj) img_224 = functions.detectFace(
return resp_obj img_path,
#return resp_objects target_size=(224, 224),
grayscale=False,
enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
# print("gender prediction")
gender_prediction = gender_model.predict(img_224)[0, :]
if np.argmax(gender_prediction) == 0:
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += '"gender": "%s"' % (gender)
elif action == "race":
if img_224 is None:
img_224 = functions.detectFace(
img_path,
target_size=(224, 224),
grayscale=False,
enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
race_predictions = race_model.predict(img_224)[0, :]
race_labels = [
"asian",
"indian",
"black",
"white",
"middle eastern",
"latino hispanic",
]
sum_of_predictions = race_predictions.sum()
race_obj = '"race": {'
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0:
race_obj += ", "
race_obj += '"%s": %s' % (race_label, race_prediction)
race_obj += "}"
race_obj += ', "dominant_race": "%s"' % (
race_labels[np.argmax(race_predictions)]
)
resp_obj += race_obj
action_idx = action_idx + 1
resp_obj += "}"
resp_obj = json.loads(resp_obj)
if bulkProcess == True:
resp_objects.append(resp_obj)
else:
return resp_obj
if bulkProcess == True:
resp_obj = "{"
for i in range(0, len(resp_objects)):
resp_item = json.dumps(resp_objects[i])
if i > 0:
resp_obj += ", "
resp_obj += '"instance_' + str(i + 1) + '": ' + resp_item
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
# return resp_objects
def detectFace(img_path): def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3) img = functions.detectFace(img_path)[0] # detectFace returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb return img[:, :, ::-1] # bgr to rgb
def stream(db_path = '', model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True): def stream(
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis) db_path="",
model_name="VGG-Face",
distance_metric="cosine",
enable_face_analysis=True,
):
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis)
def allocateMemory(): def allocateMemory():
print("Analyzing your system...") print("Analyzing your system...")
functions.allocateMemory() functions.allocateMemory()
functions.initializeFolder() functions.initializeFolder()
#--------------------------- # ---------------------------

File diff suppressed because it is too large Load Diff

View File

@ -3,44 +3,71 @@ from pathlib import Path
import gdown import gdown
import keras import keras
from keras.models import Model, Sequential from keras.models import Model, Sequential
from keras.layers import Convolution2D, LocallyConnected2D, MaxPooling2D, Flatten, Dense, Dropout from keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
import zipfile import zipfile
#------------------------------------- # -------------------------------------
def loadModel():
base_model = Sequential() def get_base_model():
base_model.add(Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3))) base_model = Sequential()
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2')) base_model.add(
base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3')) Convolution2D(
base_model.add(LocallyConnected2D(16, (9, 9), activation='relu', name='L4')) 32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3)
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5') ) )
base_model.add(LocallyConnected2D(16, (5, 5), activation='relu', name='L6')) )
base_model.add(Flatten(name='F0')) base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
base_model.add(Dense(4096, activation='relu', name='F7')) base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
base_model.add(Dropout(rate=0.5, name='D0')) base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
base_model.add(Dense(8631, activation='softmax', name='F8')) base_model.add(
LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5")
#--------------------------------- )
base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
home = str(Path.home()) base_model.add(Flatten(name="F0"))
base_model.add(Dense(4096, activation="relu", name="F7"))
if os.path.isfile(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5') != True: base_model.add(Dropout(rate=0.5, name="D0"))
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...") base_model.add(Dense(8631, activation="softmax", name="F8"))
return base_model
url = 'https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
output = home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip' def loadModel(model_path=""):
# ---------------------------------
gdown.download(url, output, quiet=False) if model_path:
assert Path(model_path).exists()
#unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip assert model_path.endswith(".h5")
with zipfile.ZipFile(output, 'r') as zip_ref: else:
zip_ref.extractall(home+'/.deepface/weights/') home = Path.home().as_posix()
model_path = os.path.join(
base_model.load_weights(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5') home, ".deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5"
)
#drop F8 and D0. F7 is the representation layer. if not os.path.isfile(model_path):
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output) print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
return deepface_model url = "https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
zip_path = os.path.join(
home, ".deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
)
gdown.download(url, zip_path, quiet=False)
# unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.join(home, "/.deepface/weights/"))
print(f"Loading model from {model_path}")
base_model = get_base_model()
base_model.load_weights(model_path)
# drop F8 and D0. F7 is the representation layer.
deepface_model = Model(
inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output
)
return deepface_model

View File

@ -13,238 +13,397 @@ from keras.layers.normalization import BatchNormalization
from keras.models import load_model from keras.models import load_model
from keras import backend as K from keras import backend as K
#--------------------------------------- # ---------------------------------------
def loadModel():
myInput = Input(shape=(96, 96, 3))
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput) def get_base_model():
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) myInput = Input(shape=(96, 96, 3))
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
# Inception3a x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x) x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3) x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3) x = Activation("relu")(x)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3) x = ZeroPadding2D(padding=(1, 1))(x)
inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3) x = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3) x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3) x = Conv2D(64, (1, 1), name="conv2")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name="conv3")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
x = Activation("relu")(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(
x
) # x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x) # Inception3a
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5) inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5) inception_3a_3x3 = BatchNormalization(
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5) axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1"
inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5) )(inception_3a_3x3)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5) inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5) inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(
inception_3a_3x3
)
inception_3a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2"
)(inception_3a_3x3)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x) inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool) inception_3a_5x5 = BatchNormalization(
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool) axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1"
inception_3a_pool = Activation('relu')(inception_3a_pool) )(inception_3a_5x5)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool) inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(
inception_3a_5x5
)
inception_3a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2"
)(inception_3a_5x5)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x) inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1) inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(
inception_3a_1x1 = Activation('relu')(inception_3a_1x1) inception_3a_pool
)
inception_3a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_pool_bn"
)(inception_3a_pool)
inception_3a_pool = Activation("relu")(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3) inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
inception_3a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_1x1_bn"
)(inception_3a_1x1)
inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
# Inception3b inception_3a = concatenate(
inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a) [inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1],
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3) axis=3,
inception_3b_3x3 = Activation('relu')(inception_3b_3x3) )
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a) # Inception3b
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5) inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5) inception_3b_3x3 = BatchNormalization(
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5) axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1"
inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5) )(inception_3b_3x3)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5) inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5) inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(
inception_3b_3x3
)
inception_3b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2"
)(inception_3b_3x3)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a) inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool) inception_3b_5x5 = BatchNormalization(
inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool) axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1"
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool) )(inception_3b_5x5)
inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool) inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool) inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_pool = Activation('relu')(inception_3b_pool) inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool) inception_3b_5x5
)
inception_3b_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2"
)(inception_3b_5x5)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a) inception_3b_pool = Lambda(lambda x: x ** 2, name="power2_3b")(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1) inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_3b_1x1 = Activation('relu')(inception_3b_1x1) inception_3b_pool
)
inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(
inception_3b_pool
)
inception_3b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_pool_bn"
)(inception_3b_pool)
inception_3b_pool = Activation("relu")(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3) inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
inception_3b_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_1x1_bn"
)(inception_3b_1x1)
inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
# Inception3c inception_3b = concatenate(
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b) [inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1],
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3) axis=3,
inception_3c_3x3 = Activation('relu')(inception_3c_3x3) )
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b) # Inception3c
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5) inception_3c_3x3 = Conv2D(
inception_3c_5x5 = Activation('relu')(inception_3c_5x5) 128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1"
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5) )(inception_3b)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5) inception_3c_3x3 = BatchNormalization(
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5) axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1"
inception_3c_5x5 = Activation('relu')(inception_3c_5x5) )(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(
256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b) inception_3c_5x5 = Conv2D(
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool) 32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1"
)(inception_3b)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(
64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3) inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
#inception 4a inception_3c = concatenate(
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c) [inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3) )
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c) # inception 4a
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5) inception_4a_3x3 = Conv2D(
inception_4a_5x5 = Activation('relu')(inception_4a_5x5) 96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1"
inception_4a_5x5 = ZeroPadding2D(padding=(2,2))(inception_4a_5x5) )(inception_3c)
inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5) inception_4a_3x3 = BatchNormalization(
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5) axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
inception_4a_5x5 = Activation('relu')(inception_4a_5x5) )(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(
192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c) inception_4a_5x5 = Conv2D(
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool) 32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1"
inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool) )(inception_3c)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool) inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(
64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool) inception_4a_pool = Lambda(lambda x: x ** 2, name="power2_4a")(inception_3c)
inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool) inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_4a_pool = Activation('relu')(inception_4a_pool) inception_4a_pool
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool) )
inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c) inception_4a_pool = Conv2D(
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1) 128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + ""
inception_4a_1x1 = Activation('relu')(inception_4a_1x1) )(inception_4a_pool)
inception_4a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
)(inception_4a_pool)
inception_4a_pool = Activation("relu")(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3) inception_4a_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + ""
)(inception_3c)
inception_4a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + ""
)(inception_4a_1x1)
inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
#inception4e inception_4a = concatenate(
inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a) [inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1],
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3) axis=3,
inception_4e_3x3 = Activation('relu')(inception_4e_3x3) )
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a) # inception4e
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5) inception_4e_3x3 = Conv2D(
inception_4e_5x5 = Activation('relu')(inception_4e_5x5) 160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1"
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5) )(inception_4a)
inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5) inception_4e_3x3 = BatchNormalization(
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5) axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
inception_4e_5x5 = Activation('relu')(inception_4e_5x5) )(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(
256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a) inception_4e_5x5 = Conv2D(
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool) 64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1"
)(inception_4a)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(
128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3) inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
#inception5a inception_4e = concatenate(
inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e) [inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3) )
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e) # inception5a
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool) inception_5a_3x3 = Conv2D(
inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool) 96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1"
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool) )(inception_4e)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(
384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool) inception_5a_pool = Lambda(lambda x: x ** 2, name="power2_5a")(inception_4e)
inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool) inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_5a_pool = Activation('relu')(inception_5a_pool) inception_5a_pool
inception_5a_pool = ZeroPadding2D(padding=(1,1))(inception_5a_pool) )
inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e) inception_5a_pool = Conv2D(
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1) 96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + ""
inception_5a_1x1 = Activation('relu')(inception_5a_1x1) )(inception_5a_pool)
inception_5a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
)(inception_5a_pool)
inception_5a_pool = Activation("relu")(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3) inception_5a_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + ""
)(inception_4e)
inception_5a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + ""
)(inception_5a_1x1)
inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
#inception_5b inception_5a = concatenate(
inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a) [inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3) )
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1,1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a) # inception_5b
inception_5b_3x3 = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1"
)(inception_5a)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(
384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool) inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
inception_5b_pool = Activation('relu')(inception_5b_pool)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool) inception_5b_pool = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + ""
)(inception_5b_pool)
inception_5b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
)(inception_5b_pool)
inception_5b_pool = Activation("relu")(inception_5b_pool)
inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a) inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
inception_5b_1x1 = Activation('relu')(inception_5b_1x1)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3) inception_5b_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + ""
)(inception_5a)
inception_5b_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + ""
)(inception_5b_1x1)
inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b) inception_5b = concatenate(
reshape_layer = Flatten()(av_pool) [inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3
dense_layer = Dense(128, name='dense_layer')(reshape_layer) )
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)
# Final Model av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
model = Model(inputs=[myInput], outputs=norm_layer) reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name="dense_layer")(reshape_layer)
#----------------------------------- norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(
dense_layer
home = str(Path.home()) )
if os.path.isfile(home+'/.deepface/weights/openface_weights.h5') != True: # Final Model
print("openface_weights.h5 will be downloaded...") return Model(inputs=[myInput], outputs=norm_layer)
url = 'https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'
output = home+'/.deepface/weights/openface_weights.h5' def loadModel(model_path=""):
gdown.download(url, output, quiet=False) # -----------------------------------
if model_path:
#----------------------------------- assert Path(model_path).exists()
assert model_path.endswith(".h5")
model.load_weights(home+'/.deepface/weights/openface_weights.h5')
else:
#----------------------------------- home = Path.home().as_posix()
model_path = home + "/.deepface/weights/openface_weights.h5"
return model if not os.path.isfile(model_path):
print(f"openface_weights.h5 will be downloaded to {model_path}")
url = "https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn"
gdown.download(url, model_path, quiet=False)
# -----------------------------------
print(f"Loading model from {model_path}")
model = get_base_model()
model.load_weights(model_path)
# -----------------------------------
return model

View File

@ -1,81 +1,99 @@
import os import os
from pathlib import Path from pathlib import Path
from keras.models import Model, Sequential from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation from keras.layers import (
Input,
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
Activation,
)
import gdown import gdown
#--------------------------------------- # ---------------------------------------
def baseModel():
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1))) def get_base_model():
model.add(Convolution2D(128, (3, 3), activation='relu')) model = Sequential()
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(128, (3, 3), activation='relu')) model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu')) model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu')) model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1,1))) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu')) model.add(ZeroPadding2D((1, 1)))
model.add(Dropout(0.5)) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(Convolution2D(4096, (1, 1), activation='relu')) model.add(ZeroPadding2D((1, 1)))
model.add(Dropout(0.5)) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(Convolution2D(2622, (1, 1))) model.add(ZeroPadding2D((1, 1)))
model.add(Flatten()) model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(Activation('softmax')) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
return model
def loadModel(): model.add(Convolution2D(4096, (7, 7), activation="relu"))
model.add(Dropout(0.5))
model = baseModel() model.add(Convolution2D(4096, (1, 1), activation="relu"))
model.add(Dropout(0.5))
#----------------------------------- model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
home = str(Path.home()) model.add(Activation("softmax"))
if os.path.isfile(home+'/.deepface/weights/vgg_face_weights.h5') != True: return model
print("vgg_face_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo' def loadModel(model_path=""):
output = home+'/.deepface/weights/vgg_face_weights.h5' """
gdown.download(url, output, quiet=False) Args:
model_path: str
#----------------------------------- If provided, this path will be used to load the model from.
"""
model.load_weights(home+'/.deepface/weights/vgg_face_weights.h5') if model_path:
assert Path(model_path).exists()
#----------------------------------- assert model_path.endswith(".h5")
else:
#TO-DO: why? home = Path.home().as_posix()
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) model_path = os.path.join(home, ".deepface/weights/vgg_face_weights.h5")
if not os.path.isfile(model_path):
return vgg_face_descriptor print(f"vgg_face_weights.h5 will be downloaded to {model_path}")
url = "https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo"
gdown.download(url, model_path, quiet=False)
# -----------------------------------
print(f"Loading model from {model_path}")
model = get_base_model()
model.load_weights(model_path)
# -----------------------------------
# TO-DO: why?
vgg_face_descriptor = Model(
inputs=model.layers[0].input, outputs=model.layers[-2].output
)
return vgg_face_descriptor

View File

@ -17,46 +17,54 @@ import subprocess
import tensorflow as tf import tensorflow as tf
import keras import keras
def loadBase64Img(uri): def loadBase64Img(uri):
encoded_data = uri.split(',')[1] encoded_data = uri.split(",")[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8) nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img return img
def distance(a, b): def distance(a, b):
x1 = a[0]; y1 = a[1] x1 = a[0]
x2 = b[0]; y2 = b[1] y1 = a[1]
x2 = b[0]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1))) y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def findFileHash(file): def findFileHash(file):
BLOCK_SIZE = 65536 # The size of each read from the file BLOCK_SIZE = 65536 # The size of each read from the file
file_hash = (
hashlib.sha256()
) # Create the hash object, can use something other than `.sha256()` if you wish
with open(file, "rb") as f: # Open the file to read it's bytes
fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above
while len(fb) > 0: # While there is still data being read from the file
file_hash.update(fb) # Update the hash
fb = f.read(BLOCK_SIZE) # Read the next block from the file
return file_hash.hexdigest()
file_hash = hashlib.sha256() # Create the hash object, can use something other than `.sha256()` if you wish
with open(file, 'rb') as f: # Open the file to read it's bytes
fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above
while len(fb) > 0: # While there is still data being read from the file
file_hash.update(fb) # Update the hash
fb = f.read(BLOCK_SIZE) # Read the next block from the file
return file_hash.hexdigest()
def initializeFolder(): def initializeFolder():
home = str(Path.home()) home = str(Path.home())
if not os.path.exists(home+"/.deepface"): if not os.path.exists(home + "/.deepface"):
os.mkdir(home+"/.deepface") os.mkdir(home + "/.deepface")
print("Directory ",home,"/.deepface created") print("Directory ", home, "/.deepface created")
if not os.path.exists(home+"/.deepface/weights"): if not os.path.exists(home + "/.deepface/weights"):
os.mkdir(home+"/.deepface/weights") os.mkdir(home + "/.deepface/weights")
print("Directory ",home,"/.deepface/weights created") print("Directory ", home, "/.deepface/weights created")
#---------------------------------- # ----------------------------------
""" """
#avoid interrupted file download #avoid interrupted file download
weight_hashes = [ weight_hashes = [
['age_model_weights.h5', '0aeff75734bfe794113756d2bfd0ac823d51e9422c8961125b570871d3c2b114'] ['age_model_weights.h5', '0aeff75734bfe794113756d2bfd0ac823d51e9422c8961125b570871d3c2b114']
, ['facenet_weights.h5', '90659cc97bfda5999120f95d8e122f4d262cca11715a21e59ba024bcce816d5c'] , ['facenet_weights.h5', '90659cc97bfda5999120f95d8e122f4d262cca11715a21e59ba024bcce816d5c']
@ -66,12 +74,12 @@ def initializeFolder():
, ['race_model_single_batch.h5', 'eb22b28b1f6dfce65b64040af4e86003a5edccb169a1a338470dde270b6f5e54'] , ['race_model_single_batch.h5', 'eb22b28b1f6dfce65b64040af4e86003a5edccb169a1a338470dde270b6f5e54']
, ['vgg_face_weights.h5', '759266b9614d0fd5d65b97bf716818b746cc77ab5944c7bffc937c6ba9455d8c'] , ['vgg_face_weights.h5', '759266b9614d0fd5d65b97bf716818b746cc77ab5944c7bffc937c6ba9455d8c']
] ]
for i in weight_hashes: for i in weight_hashes:
weight_file = home+"/.deepface/weights/"+i[0] weight_file = home+"/.deepface/weights/"+i[0]
expected_hash = i[1] expected_hash = i[1]
#check file exits #check file exits
if os.path.isfile(weight_file) == True: if os.path.isfile(weight_file) == True:
current_hash = findFileHash(weight_file) current_hash = findFileHash(weight_file)
@ -79,309 +87,346 @@ def initializeFolder():
print("hash violated for ", i[0],". It's going to be removed.") print("hash violated for ", i[0],". It's going to be removed.")
os.remove(weight_file) os.remove(weight_file)
""" """
#---------------------------------- # ----------------------------------
def findThreshold(model_name, distance_metric): def findThreshold(model_name, distance_metric):
threshold = 0.40 threshold = 0.40
if model_name == 'VGG-Face': if model_name == "VGG-Face":
if distance_metric == 'cosine': if distance_metric == "cosine":
threshold = 0.40 threshold = 0.40
elif distance_metric == 'euclidean': elif distance_metric == "euclidean":
threshold = 0.55 threshold = 0.55
elif distance_metric == 'euclidean_l2': elif distance_metric == "euclidean_l2":
threshold = 0.75 threshold = 0.75
elif model_name == 'OpenFace': elif model_name == "OpenFace":
if distance_metric == 'cosine': if distance_metric == "cosine":
threshold = 0.10 threshold = 0.10
elif distance_metric == 'euclidean': elif distance_metric == "euclidean":
threshold = 0.55 threshold = 0.55
elif distance_metric == 'euclidean_l2': elif distance_metric == "euclidean_l2":
threshold = 0.55 threshold = 0.55
elif model_name == 'Facenet': elif model_name == "Facenet":
if distance_metric == 'cosine': if distance_metric == "cosine":
threshold = 0.40 threshold = 0.40
elif distance_metric == 'euclidean': elif distance_metric == "euclidean":
threshold = 10 threshold = 10
elif distance_metric == 'euclidean_l2': elif distance_metric == "euclidean_l2":
threshold = 0.80 threshold = 0.80
elif model_name == 'DeepFace': elif model_name == "DeepFace":
if distance_metric == 'cosine': if distance_metric == "cosine":
threshold = 0.23 threshold = 0.23
elif distance_metric == 'euclidean': elif distance_metric == "euclidean":
threshold = 64 threshold = 64
elif distance_metric == 'euclidean_l2': elif distance_metric == "euclidean_l2":
threshold = 0.64 threshold = 0.64
return threshold return threshold
def get_opencv_path(): def get_opencv_path():
opencv_home = cv2.__file__ opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1] folders = opencv_home.split(os.path.sep)[0:-1]
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
face_detector_path = path+"/data/haarcascade_frontalface_default.xml" path = folders[0]
eye_detector_path = path+"/data/haarcascade_eye.xml" for folder in folders[1:]:
path = path + "/" + folder
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.") face_detector_path = path + "/data/haarcascade_frontalface_default.xml"
eye_detector_path = path + "/data/haarcascade_eye.xml"
return path+"/data/"
if os.path.isfile(face_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
return path + "/data/"
def detectFace(img, target_size=(224, 224), grayscale=False, enforce_detection=True):
img_path = ""
# -----------------------
exact_image = False
if type(img).__module__ == np.__name__:
exact_image = True
base64_img = False
if len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
# -----------------------
opencv_path = get_opencv_path()
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
eye_detector_path = opencv_path + "haarcascade_eye.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
# --------------------------------
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
if base64_img == True:
img = loadBase64Img(img)
elif exact_image != True: # image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ", img, " exists")
img = cv2.imread(img)
img_raw = img.copy()
# --------------------------------
faces = face_detector.detectMultiScale(img, 1.3, 5)
# print("found faces in ",image_path," is ",len(faces))
if len(faces) > 0:
x, y, w, h = faces[0]
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
detected_face_gray = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
# ---------------------------
# face alignment
eyes = eye_detector.detectMultiScale(detected_face_gray)
if len(eyes) >= 2:
# find the largest 2 eye
base_eyes = eyes[:, 2]
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns=["length", "idx"]).sort_values(
by=["length"], ascending=False
)
eyes = eyes[df.idx.values[0:2]]
# -----------------------
# decide left and right eye
eye_1 = eyes[0]
eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
# -----------------------
# find center of eyes
left_eye_center = (
int(left_eye[0] + (left_eye[2] / 2)),
int(left_eye[1] + (left_eye[3] / 2)),
)
left_eye_x = left_eye_center[0]
left_eye_y = left_eye_center[1]
right_eye_center = (
int(right_eye[0] + (right_eye[2] / 2)),
int(right_eye[1] + (right_eye[3] / 2)),
)
right_eye_x = right_eye_center[0]
right_eye_y = right_eye_center[1]
# -----------------------
# find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 # rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 # rotate inverse direction of clock
# -----------------------
# find length of triangle edges
a = distance(left_eye_center, point_3rd)
b = distance(right_eye_center, point_3rd)
c = distance(right_eye_center, left_eye_center)
# -----------------------
# apply cosine rule
cos_a = (b * b + c * c - a * a) / (2 * b * c)
angle = np.arccos(cos_a) # angle in radian
angle = (angle * 180) / math.pi # radian to degree
# -----------------------
# rotate base image
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img_raw)
img = np.array(img.rotate(direction * angle))
# you recover the base image and face detection disappeared. apply again.
faces = face_detector.detectMultiScale(img, 1.3, 5)
if len(faces) > 0:
x, y, w, h = faces[0]
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
# -----------------------
# face alignment block end
# ---------------------------
# face alignment block needs colorful images. that's why, converting to gray scale logic moved to here.
if grayscale == True:
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
detected_face = cv2.resize(detected_face, target_size)
img_pixels = image.img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis=0)
# normalize input in [0, 1]
img_pixels /= 255
return img_pixels
else:
if (exact_image == True) or (enforce_detection != True):
if grayscale == True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, target_size)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
return img_pixels
else:
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
)
def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
img_path = ""
#-----------------------
exact_image = False
if type(img).__module__ == np.__name__:
exact_image = True
base64_img = False
if len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
#-----------------------
opencv_path = get_opencv_path()
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
eye_detector_path = opencv_path+"haarcascade_eye.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
#--------------------------------
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
if base64_img == True:
img = loadBase64Img(img)
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
img = cv2.imread(img)
img_raw = img.copy()
#--------------------------------
faces = face_detector.detectMultiScale(img, 1.3, 5)
#print("found faces in ",image_path," is ",len(faces))
if len(faces) > 0:
x,y,w,h = faces[0]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
detected_face_gray = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
#---------------------------
#face alignment
eyes = eye_detector.detectMultiScale(detected_face_gray)
if len(eyes) >= 2:
#find the largest 2 eye
base_eyes = eyes[:, 2]
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
#-----------------------
#decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
#-----------------------
#find center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
#-----------------------
#find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
#-----------------------
#find length of triangle edges
a = distance(left_eye_center, point_3rd)
b = distance(right_eye_center, point_3rd)
c = distance(right_eye_center, left_eye_center)
#-----------------------
#apply cosine rule
cos_a = (b*b + c*c - a*a)/(2*b*c)
angle = np.arccos(cos_a) #angle in radian
angle = (angle * 180) / math.pi #radian to degree
#-----------------------
#rotate base image
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img_raw)
img = np.array(img.rotate(direction * angle))
#you recover the base image and face detection disappeared. apply again.
faces = face_detector.detectMultiScale(img, 1.3, 5)
if len(faces) > 0:
x,y,w,h = faces[0]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
#-----------------------
#face alignment block end
#---------------------------
#face alignment block needs colorful images. that's why, converting to gray scale logic moved to here.
if grayscale == True:
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
detected_face = cv2.resize(detected_face, target_size)
img_pixels = image.img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis = 0)
#normalize input in [0, 1]
img_pixels /= 255
return img_pixels
else:
if (exact_image == True) or (enforce_detection != True):
if grayscale == True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, target_size)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255
return img_pixels
else:
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
def allocateMemory(): def allocateMemory():
#find allocated memories
gpu_indexes = []
memory_usage_percentages = []; available_memories = []; total_memories = []; utilizations = []
power_usages = []; power_capacities = []
try:
result = subprocess.check_output(['nvidia-smi'])
dashboard = result.decode("utf-8").split("=|") # find allocated memories
gpu_indexes = []
memory_usage_percentages = []
available_memories = []
total_memories = []
utilizations = []
power_usages = []
power_capacities = []
dashboard = dashboard[1].split("\n") try:
result = subprocess.check_output(["nvidia-smi"])
gpu_idx = 0
for line in dashboard: dashboard = result.decode("utf-8").split("=|")
if ("MiB" in line):
power_info = line.split("|")[1] dashboard = dashboard[1].split("\n")
power_capacity = int(power_info.split("/")[-1].replace("W", ""))
power_usage = int((power_info.split("/")[-2]).strip().split(" ")[-1].replace("W", "")) gpu_idx = 0
for line in dashboard:
power_usages.append(power_usage) if "MiB" in line:
power_capacities.append(power_capacity) power_info = line.split("|")[1]
power_capacity = int(power_info.split("/")[-1].replace("W", ""))
#---------------------------- power_usage = int(
(power_info.split("/")[-2]).strip().split(" ")[-1].replace("W", "")
memory_info = line.split("|")[2].replace("MiB","").split("/") )
utilization_info = int(line.split("|")[3].split("%")[0])
power_usages.append(power_usage)
allocated = int(memory_info[0]) power_capacities.append(power_capacity)
total_memory = int(memory_info[1])
available_memory = total_memory - allocated # ----------------------------
total_memories.append(total_memory) memory_info = line.split("|")[2].replace("MiB", "").split("/")
available_memories.append(available_memory) utilization_info = int(line.split("|")[3].split("%")[0])
memory_usage_percentages.append(round(100*int(allocated)/int(total_memory), 4))
utilizations.append(utilization_info) allocated = int(memory_info[0])
gpu_indexes.append(gpu_idx) total_memory = int(memory_info[1])
available_memory = total_memory - allocated
gpu_idx = gpu_idx + 1
total_memories.append(total_memory)
gpu_count = gpu_idx * 1 available_memories.append(available_memory)
memory_usage_percentages.append(
except Exception as err: round(100 * int(allocated) / int(total_memory), 4)
gpu_count = 0 )
#print(str(err)) utilizations.append(utilization_info)
gpu_indexes.append(gpu_idx)
#------------------------------
gpu_idx = gpu_idx + 1
df = pd.DataFrame(gpu_indexes, columns = ["gpu_index"])
df["total_memories_in_mb"] = total_memories gpu_count = gpu_idx * 1
df["available_memories_in_mb"] = available_memories
df["memory_usage_percentage"] = memory_usage_percentages except Exception as err:
df["utilizations"] = utilizations gpu_count = 0
df["power_usages_in_watts"] = power_usages # print(str(err))
df["power_capacities_in_watts"] = power_capacities
# ------------------------------
df = df.sort_values(by = ["available_memories_in_mb"], ascending = False).reset_index(drop = True)
df = pd.DataFrame(gpu_indexes, columns=["gpu_index"])
#------------------------------ df["total_memories_in_mb"] = total_memories
df["available_memories_in_mb"] = available_memories
required_memory = 10000 #All deepface models require 9016 MiB df["memory_usage_percentage"] = memory_usage_percentages
df["utilizations"] = utilizations
if df.shape[0] > 0: #has gpu df["power_usages_in_watts"] = power_usages
if df.iloc[0].available_memories_in_mb > required_memory: df["power_capacities_in_watts"] = power_capacities
my_gpu = str(int(df.iloc[0].gpu_index))
os.environ["CUDA_VISIBLE_DEVICES"] = my_gpu df = df.sort_values(by=["available_memories_in_mb"], ascending=False).reset_index(
drop=True
#------------------------------ )
#tf allocates all memory by default
#this block avoids greedy approach # ------------------------------
config = tf.ConfigProto() required_memory = 10000 # All deepface models require 9016 MiB
config.gpu_options.allow_growth = True
session = tf.Session(config=config) if df.shape[0] > 0: # has gpu
keras.backend.set_session(session) if df.iloc[0].available_memories_in_mb > required_memory:
my_gpu = str(int(df.iloc[0].gpu_index))
print("DeepFace will run on GPU (gpu_", my_gpu,")") os.environ["CUDA_VISIBLE_DEVICES"] = my_gpu
else:
#this case has gpu but no enough memory to allocate # ------------------------------
os.environ["CUDA_VISIBLE_DEVICES"] = "" #run it on cpu # tf allocates all memory by default
print("Even though the system has GPUs, there is no enough space in memory to allocate.") # this block avoids greedy approach
print("DeepFace will run on CPU")
else: config = tf.ConfigProto()
print("DeepFace will run on CPU") config.gpu_options.allow_growth = True
session = tf.Session(config=config)
#------------------------------ keras.backend.set_session(session)
print("DeepFace will run on GPU (gpu_", my_gpu, ")")
else:
# this case has gpu but no enough memory to allocate
os.environ["CUDA_VISIBLE_DEVICES"] = "" # run it on cpu
print(
"Even though the system has GPUs, there is no enough space in memory to allocate."
)
print("DeepFace will run on CPU")
else:
print("DeepFace will run on CPU")
# ------------------------------