Pass custom path argument while loading model

Add `model_path` parameter in `loadModel()` function. This adds more
flexiblity while loading the models.

Also, refactor code using standard `os.path.join` which will make sure
to join the paths correctly.
This commit is contained in:
NISH1001 2020-05-20 15:55:08 +05:45
parent 892cbae56a
commit 2c0a507994
7 changed files with 3448 additions and 1447 deletions

View File

@ -1,5 +1,6 @@
from keras.preprocessing import image
import warnings
warnings.filterwarnings("ignore")
import time
import os
@ -20,8 +21,15 @@ from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst
def verify(img1_path, img2_path=''
, model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = True):
def verify(
img1_path,
img2_path="",
model_name="VGG-Face",
distance_metric="cosine",
model=None,
enforce_detection=True,
):
tic = time.time()
@ -35,19 +43,19 @@ def verify(img1_path, img2_path=''
# ------------------------------
if model == None:
if model_name == 'VGG-Face':
if model_name == "VGG-Face":
print("Using VGG-Face model backend and", distance_metric, "distance.")
model = VGGFace.loadModel()
elif model_name == 'OpenFace':
elif model_name == "OpenFace":
print("Using OpenFace model backend", distance_metric, "distance.")
model = OpenFace.loadModel()
elif model_name == 'Facenet':
elif model_name == "Facenet":
print("Using Facenet model backend", distance_metric, "distance.")
model = Facenet.loadModel()
elif model_name == 'DeepFace':
elif model_name == "DeepFace":
print("Using FB DeepFace model backend", distance_metric, "distance.")
model = FbDeepFace.loadModel()
@ -66,7 +74,7 @@ def verify(img1_path, img2_path=''
threshold = functions.findThreshold(model_name, distance_metric)
# ------------------------------
pbar = tqdm(range(0,len(img_list)), desc='Verification')
pbar = tqdm(range(0, len(img_list)), desc="Verification")
resp_objects = []
@ -82,8 +90,12 @@ def verify(img1_path, img2_path=''
# ----------------------
# crop and align faces
img1 = functions.detectFace(img1_path, input_shape, enforce_detection = enforce_detection)
img2 = functions.detectFace(img2_path, input_shape, enforce_detection = enforce_detection)
img1 = functions.detectFace(
img1_path, input_shape, enforce_detection=enforce_detection
)
img2 = functions.detectFace(
img2_path, input_shape, enforce_detection=enforce_detection
)
# ----------------------
# find embeddings
@ -94,12 +106,19 @@ def verify(img1_path, img2_path=''
# ----------------------
# find distances between embeddings
if distance_metric == 'cosine':
distance = dst.findCosineDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean':
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
if distance_metric == "cosine":
distance = dst.findCosineDistance(
img1_representation, img2_representation
)
elif distance_metric == "euclidean":
distance = dst.findEuclideanDistance(
img1_representation, img2_representation
)
elif distance_metric == "euclidean_l2":
distance = dst.findEuclideanDistance(
dst.l2_normalize(img1_representation),
dst.l2_normalize(img2_representation),
)
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
@ -115,11 +134,11 @@ def verify(img1_path, img2_path=''
# response object
resp_obj = "{"
resp_obj += "\"verified\": "+identified
resp_obj += ", \"distance\": "+str(distance)
resp_obj += ", \"max_threshold_to_verify\": "+str(threshold)
resp_obj += ", \"model\": \""+model_name+"\""
resp_obj += ", \"similarity_metric\": \""+distance_metric+"\""
resp_obj += '"verified": ' + identified
resp_obj += ', "distance": ' + str(distance)
resp_obj += ', "max_threshold_to_verify": ' + str(threshold)
resp_obj += ', "model": "' + model_name + '"'
resp_obj += ', "similarity_metric": "' + distance_metric + '"'
resp_obj += "}"
resp_obj = json.loads(resp_obj) # string to json
@ -149,7 +168,7 @@ def verify(img1_path, img2_path=''
if i > 0:
resp_obj += ", "
resp_obj += "\"pair_"+str(i+1)+"\": "+resp_item
resp_obj += '"pair_' + str(i + 1) + '": ' + resp_item
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
@ -169,44 +188,44 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
# if a specific target is not passed, then find them all
if len(actions) == 0:
actions= ['emotion', 'age', 'gender', 'race']
actions = ["emotion", "age", "gender", "race"]
print("Actions to do: ", actions)
# ---------------------------------
if 'emotion' in actions:
if 'emotion' in models:
if "emotion" in actions:
if "emotion" in models:
print("already built emotion model is passed")
emotion_model = models['emotion']
emotion_model = models["emotion"]
else:
emotion_model = Emotion.loadModel()
if 'age' in actions:
if 'age' in models:
if "age" in actions:
if "age" in models:
print("already built age model is passed")
age_model = models['age']
age_model = models["age"]
else:
age_model = Age.loadModel()
if 'gender' in actions:
if 'gender' in models:
if "gender" in actions:
if "gender" in models:
print("already built gender model is passed")
gender_model = models['gender']
gender_model = models["gender"]
else:
gender_model = Gender.loadModel()
if 'race' in actions:
if 'race' in models:
if "race" in actions:
if "race" in models:
print("already built race model is passed")
race_model = models['race']
race_model = models["race"]
else:
race_model = Race.loadModel()
# ---------------------------------
resp_objects = []
global_pbar = tqdm(range(0,len(img_paths)), desc='Analyzing')
global_pbar = tqdm(range(0, len(img_paths)), desc="Analyzing")
# for img_path in img_paths:
for j in global_pbar:
@ -216,7 +235,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
# TO-DO: do this in parallel
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
pbar = tqdm(range(0, len(actions)), desc="Finding actions")
action_idx = 0
img_224 = None # Set to prevent re-detection
@ -228,41 +247,69 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
if action_idx > 0:
resp_obj += ", "
if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, target_size = (48, 48), grayscale = True, enforce_detection = enforce_detection)
if action == "emotion":
emotion_labels = [
"angry",
"disgust",
"fear",
"happy",
"sad",
"surprise",
"neutral",
]
img = functions.detectFace(
img_path,
target_size=(48, 48),
grayscale=True,
enforce_detection=enforce_detection,
)
emotion_predictions = emotion_model.predict(img)[0, :]
sum_of_predictions = emotion_predictions.sum()
emotion_obj = "\"emotion\": {"
emotion_obj = '"emotion": {'
for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
emotion_prediction = (
100 * emotion_predictions[i] / sum_of_predictions
)
if i > 0: emotion_obj += ", "
if i > 0:
emotion_obj += ", "
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
emotion_obj += '"%s": %s' % (emotion_label, emotion_prediction)
emotion_obj += "}"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
emotion_obj += ', "dominant_emotion": "%s"' % (
emotion_labels[np.argmax(emotion_predictions)]
)
resp_obj += emotion_obj
elif action == 'age':
elif action == "age":
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.detectFace(
img_path,
target_size=(224, 224),
grayscale=False,
enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
# print("age prediction")
age_predictions = age_model.predict(img_224)[0, :]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age)
resp_obj += '"age": %s' % (apparent_age)
elif action == 'gender':
elif action == "gender":
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.detectFace(
img_path,
target_size=(224, 224),
grayscale=False,
enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
# print("gender prediction")
gender_prediction = gender_model.predict(img_224)[0, :]
@ -272,27 +319,42 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender)
resp_obj += '"gender": "%s"' % (gender)
elif action == 'race':
elif action == "race":
if img_224 is None:
img_224 = functions.detectFace(img_path, target_size = (224, 224), grayscale = False, enforce_detection = enforce_detection) #just emotion model expects grayscale images
img_224 = functions.detectFace(
img_path,
target_size=(224, 224),
grayscale=False,
enforce_detection=enforce_detection,
) # just emotion model expects grayscale images
race_predictions = race_model.predict(img_224)[0, :]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
race_labels = [
"asian",
"indian",
"black",
"white",
"middle eastern",
"latino hispanic",
]
sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {"
race_obj = '"race": {'
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", "
if i > 0:
race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += '"%s": %s' % (race_label, race_prediction)
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
race_obj += ', "dominant_race": "%s"' % (
race_labels[np.argmax(race_predictions)]
)
resp_obj += race_obj
@ -316,7 +378,7 @@ def analyze(img_path, actions = [], models = {}, enforce_detection = True):
if i > 0:
resp_obj += ", "
resp_obj += "\"instance_"+str(i+1)+"\": "+resp_item
resp_obj += '"instance_' + str(i + 1) + '": ' + resp_item
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
@ -328,14 +390,20 @@ def detectFace(img_path):
return img[:, :, ::-1] # bgr to rgb
def stream(db_path = '', model_name ='VGG-Face', distance_metric = 'cosine', enable_face_analysis = True):
def stream(
db_path="",
model_name="VGG-Face",
distance_metric="cosine",
enable_face_analysis=True,
):
realtime.analysis(db_path, model_name, distance_metric, enable_face_analysis)
def allocateMemory():
print("Analyzing your system...")
functions.allocateMemory()
functions.initializeFolder()
# ---------------------------

File diff suppressed because it is too large Load Diff

View File

@ -3,44 +3,71 @@ from pathlib import Path
import gdown
import keras
from keras.models import Model, Sequential
from keras.layers import Convolution2D, LocallyConnected2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
import zipfile
# -------------------------------------
def loadModel():
def get_base_model():
base_model = Sequential()
base_model.add(Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3)))
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2'))
base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3'))
base_model.add(LocallyConnected2D(16, (9, 9), activation='relu', name='L4'))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5') )
base_model.add(LocallyConnected2D(16, (5, 5), activation='relu', name='L6'))
base_model.add(Flatten(name='F0'))
base_model.add(Dense(4096, activation='relu', name='F7'))
base_model.add(Dropout(rate=0.5, name='D0'))
base_model.add(Dense(8631, activation='softmax', name='F8'))
base_model.add(
Convolution2D(
32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3)
)
)
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
base_model.add(
LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5")
)
base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
base_model.add(Flatten(name="F0"))
base_model.add(Dense(4096, activation="relu", name="F7"))
base_model.add(Dropout(rate=0.5, name="D0"))
base_model.add(Dense(8631, activation="softmax", name="F8"))
return base_model
def loadModel(model_path=""):
# ---------------------------------
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5') != True:
if model_path:
assert Path(model_path).exists()
assert model_path.endswith(".h5")
else:
home = Path.home().as_posix()
model_path = os.path.join(
home, ".deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5"
)
if not os.path.isfile(model_path):
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
url = 'https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
url = "https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
output = home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
gdown.download(url, output, quiet=False)
zip_path = os.path.join(
home, ".deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
)
gdown.download(url, zip_path, quiet=False)
# unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.join(home, "/.deepface/weights/"))
base_model.load_weights(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5')
print(f"Loading model from {model_path}")
base_model = get_base_model()
base_model.load_weights(model_path)
# drop F8 and D0. F7 is the representation layer.
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
deepface_model = Model(
inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output
)
return deepface_model

View File

@ -15,235 +15,394 @@ from keras import backend as K
# ---------------------------------------
def loadModel():
def get_base_model():
myInput = Input(shape=(96, 96, 3))
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
x = Conv2D(64, (1, 1), name="conv2")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = Conv2D(192, (3, 3), name="conv3")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
x = Activation("relu")(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(
x
) # x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
inception_3a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1"
)(inception_3a_3x3)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(
inception_3a_3x3
)
inception_3a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2"
)(inception_3a_3x3)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
inception_3a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1"
)(inception_3a_5x5)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(
inception_3a_5x5
)
inception_3a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2"
)(inception_3a_5x5)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
inception_3a_pool = Activation('relu')(inception_3a_pool)
inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(
inception_3a_pool
)
inception_3a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_pool_bn"
)(inception_3a_pool)
inception_3a_pool = Activation("relu")(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
inception_3a_1x1 = Activation('relu')(inception_3a_1x1)
inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
inception_3a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3a_1x1_bn"
)(inception_3a_1x1)
inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)
inception_3a = concatenate(
[inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1],
axis=3,
)
# Inception3b
inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
inception_3b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1"
)(inception_3b_3x3)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(
inception_3b_3x3
)
inception_3b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2"
)(inception_3b_3x3)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
inception_3b_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1"
)(inception_3b_5x5)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(
inception_3b_5x5
)
inception_3b_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2"
)(inception_3b_5x5)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
inception_3b_pool = Activation('relu')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x ** 2, name="power2_3b")(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_3b_pool
)
inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(
inception_3b_pool
)
inception_3b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_pool_bn"
)(inception_3b_pool)
inception_3b_pool = Activation("relu")(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
inception_3b_1x1 = Activation('relu')(inception_3b_1x1)
inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
inception_3b_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3b_1x1_bn"
)(inception_3b_1x1)
inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)
inception_3b = concatenate(
[inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1],
axis=3,
)
# Inception3c
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = Conv2D(
128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1"
)(inception_3b)
inception_3c_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1"
)(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = Conv2D(
256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = Conv2D(
32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1"
)(inception_3b)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = Conv2D(
64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
inception_3c = concatenate(
[inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3
)
# inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1"
)(inception_3c)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = Conv2D(
192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = Conv2D(
32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1"
)(inception_3c)
inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = Conv2D(
64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x ** 2, name="power2_4a")(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_4a_pool
)
inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool)
inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool)
inception_4a_pool = Activation('relu')(inception_4a_pool)
inception_4a_pool = Conv2D(
128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + ""
)(inception_4a_pool)
inception_4a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
)(inception_4a_pool)
inception_4a_pool = Activation("relu")(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1)
inception_4a_1x1 = Activation('relu')(inception_4a_1x1)
inception_4a_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + ""
)(inception_3c)
inception_4a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + ""
)(inception_4a_1x1)
inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)
inception_4a = concatenate(
[inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1],
axis=3,
)
# inception4e
inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_3x3 = Conv2D(
160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1"
)(inception_4a)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_3x3 = Conv2D(
256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = Conv2D(
64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1"
)(inception_4a)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = Conv2D(
128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
inception_4e = concatenate(
[inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3
)
# inception5a
inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1"
)(inception_4e)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = Conv2D(
384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x ** 2, name="power2_5a")(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(
inception_5a_pool
)
inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool)
inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool)
inception_5a_pool = Activation('relu')(inception_5a_pool)
inception_5a_pool = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + ""
)(inception_5a_pool)
inception_5a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
)(inception_5a_pool)
inception_5a_pool = Activation("relu")(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1)
inception_5a_1x1 = Activation('relu')(inception_5a_1x1)
inception_5a_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + ""
)(inception_4e)
inception_5a_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + ""
)(inception_5a_1x1)
inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
inception_5a = concatenate(
[inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3
)
# inception_5b
inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1"
)(inception_5a)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = Conv2D(
384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool)
inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
inception_5b_pool = Activation('relu')(inception_5b_pool)
inception_5b_pool = Conv2D(
96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + ""
)(inception_5b_pool)
inception_5b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
)(inception_5b_pool)
inception_5b_pool = Activation("relu")(inception_5b_pool)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
inception_5b_1x1 = Activation('relu')(inception_5b_1x1)
inception_5b_1x1 = Conv2D(
256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + ""
)(inception_5a)
inception_5b_1x1 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + ""
)(inception_5b_1x1)
inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
inception_5b = concatenate(
[inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3
)
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name='dense_layer')(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)
dense_layer = Dense(128, name="dense_layer")(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(
dense_layer
)
# Final Model
model = Model(inputs=[myInput], outputs=norm_layer)
return Model(inputs=[myInput], outputs=norm_layer)
def loadModel(model_path=""):
# -----------------------------------
if model_path:
assert Path(model_path).exists()
assert model_path.endswith(".h5")
else:
home = Path.home().as_posix()
model_path = home + "/.deepface/weights/openface_weights.h5"
if not os.path.isfile(model_path):
print(f"openface_weights.h5 will be downloaded to {model_path}")
url = "https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn"
gdown.download(url, model_path, quiet=False)
# -----------------------------------
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/openface_weights.h5') != True:
print("openface_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'
output = home+'/.deepface/weights/openface_weights.h5'
gdown.download(url, output, quiet=False)
#-----------------------------------
model.load_weights(home+'/.deepface/weights/openface_weights.h5')
print(f"Loading model from {model_path}")
model = get_base_model()
model.load_weights(model_path)
# -----------------------------------

View File

@ -1,81 +1,99 @@
import os
from pathlib import Path
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from keras.layers import (
Input,
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
Activation,
)
import gdown
# ---------------------------------------
def baseModel():
def get_base_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Convolution2D(4096, (7, 7), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Convolution2D(4096, (1, 1), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.add(Activation("softmax"))
return model
def loadModel():
model = baseModel()
def loadModel(model_path=""):
"""
Args:
model_path: str
If provided, this path will be used to load the model from.
"""
if model_path:
assert Path(model_path).exists()
assert model_path.endswith(".h5")
else:
home = Path.home().as_posix()
model_path = os.path.join(home, ".deepface/weights/vgg_face_weights.h5")
if not os.path.isfile(model_path):
print(f"vgg_face_weights.h5 will be downloaded to {model_path}")
url = "https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo"
gdown.download(url, model_path, quiet=False)
# -----------------------------------
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/vgg_face_weights.h5') != True:
print("vgg_face_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
output = home+'/.deepface/weights/vgg_face_weights.h5'
gdown.download(url, output, quiet=False)
#-----------------------------------
model.load_weights(home+'/.deepface/weights/vgg_face_weights.h5')
print(f"Loading model from {model_path}")
model = get_base_model()
model.load_weights(model_path)
# -----------------------------------
# TO-DO: why?
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
vgg_face_descriptor = Model(
inputs=model.layers[0].input, outputs=model.layers[-2].output
)
return vgg_face_descriptor

View File

@ -17,23 +17,30 @@ import subprocess
import tensorflow as tf
import keras
def loadBase64Img(uri):
encoded_data = uri.split(',')[1]
encoded_data = uri.split(",")[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def distance(a, b):
x1 = a[0]; y1 = a[1]
x2 = b[0]; y2 = b[1]
x1 = a[0]
y1 = a[1]
x2 = b[0]
y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def findFileHash(file):
BLOCK_SIZE = 65536 # The size of each read from the file
file_hash = hashlib.sha256() # Create the hash object, can use something other than `.sha256()` if you wish
with open(file, 'rb') as f: # Open the file to read it's bytes
file_hash = (
hashlib.sha256()
) # Create the hash object, can use something other than `.sha256()` if you wish
with open(file, "rb") as f: # Open the file to read it's bytes
fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above
while len(fb) > 0: # While there is still data being read from the file
file_hash.update(fb) # Update the hash
@ -41,6 +48,7 @@ def findFileHash(file):
return file_hash.hexdigest()
def initializeFolder():
home = str(Path.home())
@ -81,44 +89,46 @@ def initializeFolder():
"""
# ----------------------------------
def findThreshold(model_name, distance_metric):
threshold = 0.40
if model_name == 'VGG-Face':
if distance_metric == 'cosine':
if model_name == "VGG-Face":
if distance_metric == "cosine":
threshold = 0.40
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
threshold = 0.55
elif distance_metric == 'euclidean_l2':
elif distance_metric == "euclidean_l2":
threshold = 0.75
elif model_name == 'OpenFace':
if distance_metric == 'cosine':
elif model_name == "OpenFace":
if distance_metric == "cosine":
threshold = 0.10
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
threshold = 0.55
elif distance_metric == 'euclidean_l2':
elif distance_metric == "euclidean_l2":
threshold = 0.55
elif model_name == 'Facenet':
if distance_metric == 'cosine':
elif model_name == "Facenet":
if distance_metric == "cosine":
threshold = 0.40
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
threshold = 10
elif distance_metric == 'euclidean_l2':
elif distance_metric == "euclidean_l2":
threshold = 0.80
elif model_name == 'DeepFace':
if distance_metric == 'cosine':
elif model_name == "DeepFace":
if distance_metric == "cosine":
threshold = 0.23
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
threshold = 64
elif distance_metric == 'euclidean_l2':
elif distance_metric == "euclidean_l2":
threshold = 0.64
return threshold
def get_opencv_path():
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
@ -131,10 +141,15 @@ def get_opencv_path():
eye_detector_path = path + "/data/haarcascade_eye.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
return path + "/data/"
def detectFace(img, target_size=(224, 224), grayscale=False, enforce_detection=True):
img_path = ""
@ -156,7 +171,11 @@ def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection
eye_detector_path = opencv_path + "haarcascade_eye.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
# --------------------------------
@ -200,14 +219,17 @@ def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
df = pd.DataFrame(items, columns=["length", "idx"]).sort_values(
by=["length"], ascending=False
)
eyes = eyes[df.idx.values[0:2]]
# -----------------------
# decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
eye_1 = eyes[0]
eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
@ -219,11 +241,19 @@ def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection
# -----------------------
# find center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
left_eye_center = (
int(left_eye[0] + (left_eye[2] / 2)),
int(left_eye[1] + (left_eye[3] / 2)),
)
left_eye_x = left_eye_center[0]
left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
right_eye_center = (
int(right_eye[0] + (right_eye[2] / 2)),
int(right_eye[1] + (right_eye[3] / 2)),
)
right_eye_x = right_eye_center[0]
right_eye_y = right_eye_center[1]
# -----------------------
# find rotation direction
@ -296,17 +326,24 @@ def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection
img_pixels /= 255
return img_pixels
else:
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
)
def allocateMemory():
# find allocated memories
gpu_indexes = []
memory_usage_percentages = []; available_memories = []; total_memories = []; utilizations = []
power_usages = []; power_capacities = []
memory_usage_percentages = []
available_memories = []
total_memories = []
utilizations = []
power_usages = []
power_capacities = []
try:
result = subprocess.check_output(['nvidia-smi'])
result = subprocess.check_output(["nvidia-smi"])
dashboard = result.decode("utf-8").split("=|")
@ -314,10 +351,12 @@ def allocateMemory():
gpu_idx = 0
for line in dashboard:
if ("MiB" in line):
if "MiB" in line:
power_info = line.split("|")[1]
power_capacity = int(power_info.split("/")[-1].replace("W", ""))
power_usage = int((power_info.split("/")[-2]).strip().split(" ")[-1].replace("W", ""))
power_usage = int(
(power_info.split("/")[-2]).strip().split(" ")[-1].replace("W", "")
)
power_usages.append(power_usage)
power_capacities.append(power_capacity)
@ -333,7 +372,9 @@ def allocateMemory():
total_memories.append(total_memory)
available_memories.append(available_memory)
memory_usage_percentages.append(round(100*int(allocated)/int(total_memory), 4))
memory_usage_percentages.append(
round(100 * int(allocated) / int(total_memory), 4)
)
utilizations.append(utilization_info)
gpu_indexes.append(gpu_idx)
@ -355,7 +396,9 @@ def allocateMemory():
df["power_usages_in_watts"] = power_usages
df["power_capacities_in_watts"] = power_capacities
df = df.sort_values(by = ["available_memories_in_mb"], ascending = False).reset_index(drop = True)
df = df.sort_values(by=["available_memories_in_mb"], ascending=False).reset_index(
drop=True
)
# ------------------------------
@ -379,7 +422,9 @@ def allocateMemory():
else:
# this case has gpu but no enough memory to allocate
os.environ["CUDA_VISIBLE_DEVICES"] = "" # run it on cpu
print("Even though the system has GPUs, there is no enough space in memory to allocate.")
print(
"Even though the system has GPUs, there is no enough space in memory to allocate."
)
print("DeepFace will run on CPU")
else:
print("DeepFace will run on CPU")