This commit is contained in:
Sefik Ilkin Serengil 2023-01-29 00:45:25 +00:00
parent 24cc717620
commit 7a978d29c5
33 changed files with 5587 additions and 3369 deletions

45
.github/workflows/lint.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: Tests
on:
push:
paths:
- '.github/workflows/lint.yml'
- 'deepface/**'
- 'tests/**'
- 'api/**'
- 'requirements.txt'
- '.gitignore'
- 'setup.py'
pull_request:
paths:
- '.github/workflows/lint.yml'
- 'deepface/**'
- 'tests/**'
- 'api/**'
- 'requirements.txt'
- '.gitignore'
- 'setup.py'
jobs:
linting-tests-ubuntu-latest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Lint tests with pylint
run: |
pylint --fail-under=10 deepface/

1
.gitignore vendored
View File

@ -7,7 +7,6 @@ dist/
Pipfile
Pipfile.lock
.mypy_cache/
.vscode/
.idea/
deepface.egg-info/
deepface/__pycache__/*

635
.pylintrc Normal file
View File

@ -0,0 +1,635 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\' represents the directory delimiter on Windows systems, it
# can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.9
# Discover python modules and packages in the file system subtree.
recursive=no
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=BaseException,
Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
import-error,
invalid-name,
missing-module-docstring,
missing-function-docstring,
missing-class-docstring,
too-many-arguments,
too-many-locals,
too-many-branches,
too-many-statements,
global-variable-undefined,
import-outside-toplevel,
singleton-comparison,
too-many-lines,
duplicate-code,
bare-except,
cyclic-import,
global-statement
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it work,
# install the 'python-enchant' package.
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io

17
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,17 @@
{
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.linting.pylintUseMinimalCheckers": false,
"editor.formatOnSave": true,
"editor.renderWhitespace": "all",
"files.autoSave": "afterDelay",
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.formatting.blackArgs": [
"--line-length=100"
],
"editor.fontWeight": "normal",
"python.analysis.extraPaths": [
"./deepface"
]
}

File diff suppressed because it is too large Load Diff

View File

@ -1,93 +1,156 @@
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
import tensorflow
from tensorflow import keras
import os
from pathlib import Path
import gdown
import tensorflow as tf
from deepface.commons import functions
#url = "https://drive.google.com/uc?id=1LVB3CdVejpmGHM28BpqqkbZP5hDEcdZY"
# --------------------------------
# dependency configuration
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5'):
base_model = ResNet34()
inputs = base_model.inputs[0]
arcface_model = base_model.outputs[0]
arcface_model = keras.layers.BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
arcface_model = keras.layers.Dropout(0.4)(arcface_model)
arcface_model = keras.layers.Flatten()(arcface_model)
arcface_model = keras.layers.Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(arcface_model)
embedding = keras.layers.BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(arcface_model)
model = keras.models.Model(inputs, embedding, name=base_model.name)
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
#---------------------------------------
#check the availability of pre-trained weights
if tf_version == 1:
from keras.engine import training
import keras
from keras.layers import (
ZeroPadding2D,
Input,
Conv2D,
BatchNormalization,
PReLU,
Add,
Dropout,
Flatten,
Dense,
)
else:
from tensorflow.python.keras.engine import training
from tensorflow import keras
from tensorflow.keras.layers import (
ZeroPadding2D,
Input,
Conv2D,
BatchNormalization,
PReLU,
Add,
Dropout,
Flatten,
Dense,
)
# --------------------------------
home = functions.get_deepface_home()
file_name = "arcface_weights.h5"
output = home+'/.deepface/weights/'+file_name
# url = "https://drive.google.com/uc?id=1LVB3CdVejpmGHM28BpqqkbZP5hDEcdZY"
if os.path.isfile(output) != True:
print(file_name," will be downloaded to ",output)
gdown.download(url, output, quiet=False)
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
):
base_model = ResNet34()
inputs = base_model.inputs[0]
arcface_model = base_model.outputs[0]
arcface_model = BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
arcface_model = Dropout(0.4)(arcface_model)
arcface_model = Flatten()(arcface_model)
arcface_model = Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(
arcface_model
)
embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
arcface_model
)
model = keras.models.Model(inputs, embedding, name=base_model.name)
#---------------------------------------
# ---------------------------------------
# check the availability of pre-trained weights
model.load_weights(output)
home = functions.get_deepface_home()
file_name = "arcface_weights.h5"
output = home + "/.deepface/weights/" + file_name
if os.path.isfile(output) != True:
print(file_name, " will be downloaded to ", output)
gdown.download(url, output, quiet=False)
# ---------------------------------------
model.load_weights(output)
return model
return model
def ResNet34():
img_input = tensorflow.keras.layers.Input(shape=(112, 112, 3))
img_input = Input(shape=(112, 112, 3))
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name='conv1_pad')(img_input)
x = tensorflow.keras.layers.Conv2D(64, 3, strides=1, use_bias=False, kernel_initializer='glorot_normal', name='conv1_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name='conv1_bn')(x)
x = tensorflow.keras.layers.PReLU(shared_axes=[1, 2], name='conv1_prelu')(x)
x = stack_fn(x)
x = ZeroPadding2D(padding=1, name="conv1_pad")(img_input)
x = Conv2D(
64, 3, strides=1, use_bias=False, kernel_initializer="glorot_normal", name="conv1_conv"
)(x)
x = BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name="conv1_bn")(x)
x = PReLU(shared_axes=[1, 2], name="conv1_prelu")(x)
x = stack_fn(x)
model = training.Model(img_input, x, name='ResNet34')
model = training.Model(img_input, x, name="ResNet34")
return model
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
bn_axis = 3
bn_axis = 3
if conv_shortcut:
shortcut = tensorflow.keras.layers.Conv2D(filters, 1, strides=stride, use_bias=False, kernel_initializer='glorot_normal', name=name + '_0_conv')(x)
shortcut = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_0_bn')(shortcut)
else:
shortcut = x
if conv_shortcut:
shortcut = Conv2D(
filters,
1,
strides=stride,
use_bias=False,
kernel_initializer="glorot_normal",
name=name + "_0_conv",
)(x)
shortcut = BatchNormalization(
axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_1_bn')(x)
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name=name + '_1_pad')(x)
x = tensorflow.keras.layers.Conv2D(filters, 3, strides=1, kernel_initializer='glorot_normal', use_bias=False, name=name + '_1_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_2_bn')(x)
x = tensorflow.keras.layers.PReLU(shared_axes=[1, 2], name=name + '_1_prelu')(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_1_bn")(x)
x = ZeroPadding2D(padding=1, name=name + "_1_pad")(x)
x = Conv2D(
filters,
3,
strides=1,
kernel_initializer="glorot_normal",
use_bias=False,
name=name + "_1_conv",
)(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_2_bn")(x)
x = PReLU(shared_axes=[1, 2], name=name + "_1_prelu")(x)
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name=name + '_2_pad')(x)
x = tensorflow.keras.layers.Conv2D(filters, kernel_size, strides=stride, kernel_initializer='glorot_normal', use_bias=False, name=name + '_2_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_3_bn')(x)
x = ZeroPadding2D(padding=1, name=name + "_2_pad")(x)
x = Conv2D(
filters,
kernel_size,
strides=stride,
kernel_initializer="glorot_normal",
use_bias=False,
name=name + "_2_conv",
)(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_3_bn")(x)
x = Add(name=name + "_add")([shortcut, x])
return x
x = tensorflow.keras.layers.Add(name=name + '_add')([shortcut, x])
return x
def stack1(x, filters, blocks, stride1=2, name=None):
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
x = block1(x, filters, stride=stride1, name=name + "_block1")
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + "_block" + str(i))
return x
def stack_fn(x):
x = stack1(x, 64, 3, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
x = stack1(x, 64, 3, name="conv2")
x = stack1(x, 128, 4, name="conv3")
x = stack1(x, 256, 6, name="conv4")
return stack1(x, 512, 3, name="conv5")

View File

@ -1,59 +0,0 @@
from deepface import DeepFace
from tqdm import tqdm
import os
from os import path
from pathlib import Path
import numpy as np
import gdown
from deepface.commons import functions, distance as dst
def loadModel():
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
model = {}
model_pbar = tqdm(range(0, 4), desc='Face recognition models')
for index in model_pbar:
model_name = model_names[index]
model_pbar.set_description("Loading %s" % (model_name))
model[model_name] = DeepFace.build_model(model_name)
return model
def validate_model(model):
#validate model dictionary because it might be passed from input as pre-trained
found_models = []
for key, value in model.items():
found_models.append(key)
if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models):
#print("Ensemble learning will be applied for ", found_models," models")
valid = True
else:
missing_ones = set(['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']) - set(found_models)
raise ValueError("You'd like to apply ensemble method and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+str(found_models)+". So, you need to pass "+str(missing_ones)+" models as well.")
def build_gbm():
#this is not a must dependency
import lightgbm as lgb #lightgbm==2.3.1
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True:
print("face-recognition-ensemble-model.txt will be downloaded...")
url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
output = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
gdown.download(url, output, quiet=False)
ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
deepface_ensemble = lgb.Booster(model_file = ensemble_model_path)
return deepface_ensemble

View File

@ -1,56 +1,81 @@
import os
from pathlib import Path
import gdown
import zipfile
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, Activation, Input, Add, MaxPooling2D, Flatten, Dense, Dropout
import tensorflow as tf
from deepface.commons import functions
#-------------------------------------
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
#url = 'https://drive.google.com/uc?id=1uRLtBCTQQAvHJ_KVrdbRJiCKxU8m5q2J'
if tf_version == 1:
from keras.models import Model
from keras.layers import (
Conv2D,
Activation,
Input,
Add,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
else:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Conv2D,
Activation,
Input,
Add,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5'):
# pylint: disable=line-too-long
myInput = Input(shape=(55, 47, 3))
x = Conv2D(20, (4, 4), name='Conv1', activation='relu', input_shape=(55, 47, 3))(myInput)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool1')(x)
x = Dropout(rate=0.99, name='D1')(x)
# -------------------------------------
x = Conv2D(40, (3, 3), name='Conv2', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool2')(x)
x = Dropout(rate=0.99, name='D2')(x)
x = Conv2D(60, (3, 3), name='Conv3', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool3')(x)
x = Dropout(rate=0.99, name='D3')(x)
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
):
x1 = Flatten()(x)
fc11 = Dense(160, name = 'fc11')(x1)
myInput = Input(shape=(55, 47, 3))
x2 = Conv2D(80, (2, 2), name='Conv4', activation='relu')(x)
x2 = Flatten()(x2)
fc12 = Dense(160, name = 'fc12')(x2)
x = Conv2D(20, (4, 4), name="Conv1", activation="relu", input_shape=(55, 47, 3))(myInput)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool1")(x)
x = Dropout(rate=0.99, name="D1")(x)
y = Add()([fc11, fc12])
y = Activation('relu', name = 'deepid')(y)
x = Conv2D(40, (3, 3), name="Conv2", activation="relu")(x)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool2")(x)
x = Dropout(rate=0.99, name="D2")(x)
model = Model(inputs=[myInput], outputs=y)
x = Conv2D(60, (3, 3), name="Conv3", activation="relu")(x)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool3")(x)
x = Dropout(rate=0.99, name="D3")(x)
#---------------------------------
x1 = Flatten()(x)
fc11 = Dense(160, name="fc11")(x1)
home = functions.get_deepface_home()
x2 = Conv2D(80, (2, 2), name="Conv4", activation="relu")(x)
x2 = Flatten()(x2)
fc12 = Dense(160, name="fc12")(x2)
if os.path.isfile(home+'/.deepface/weights/deepid_keras_weights.h5') != True:
print("deepid_keras_weights.h5 will be downloaded...")
y = Add()([fc11, fc12])
y = Activation("relu", name="deepid")(y)
output = home+'/.deepface/weights/deepid_keras_weights.h5'
gdown.download(url, output, quiet=False)
model = Model(inputs=[myInput], outputs=y)
model.load_weights(home+'/.deepface/weights/deepid_keras_weights.h5')
# ---------------------------------
return model
home = functions.get_deepface_home()
if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True:
print("deepid_keras_weights.h5 will be downloaded...")
output = home + "/.deepface/weights/deepid_keras_weights.h5"
gdown.download(url, output, quiet=False)
model.load_weights(home + "/.deepface/weights/deepid_keras_weights.h5")
return model

View File

@ -1,74 +1,77 @@
import os
import zipfile
import bz2
import gdown
import numpy as np
from pathlib import Path
from deepface.commons import functions
# pylint: disable=too-few-public-methods
class DlibResNet:
def __init__(self):
def __init__(self):
# this is not a must dependency
import dlib # 19.20.0
#this is not a must dependency
import dlib #19.20.0
self.layers = [DlibMetaData()]
self.layers = [DlibMetaData()]
# ---------------------
#---------------------
home = functions.get_deepface_home()
weight_file = home + "/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat"
home = functions.get_deepface_home()
weight_file = home+'/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat'
# ---------------------
#---------------------
# download pre-trained model if it does not exist
if os.path.isfile(weight_file) != True:
print("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
#download pre-trained model if it does not exist
if os.path.isfile(weight_file) != True:
print("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2"
url = f"http://dlib.net/files/{file_name}"
output = f"{home}/.deepface/weights/{file_name}"
gdown.download(url, output, quiet=False)
url = "http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2"
output = home+'/.deepface/weights/'+url.split("/")[-1]
gdown.download(url, output, quiet=False)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] # discard .bz2 extension
with open(newfilepath, "wb") as f:
f.write(data)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] #discard .bz2 extension
open(newfilepath, 'wb').write(data)
# ---------------------
#---------------------
model = dlib.face_recognition_model_v1(weight_file)
self.__model = model
model = dlib.face_recognition_model_v1(weight_file)
self.__model = model
# ---------------------
#---------------------
# return None # classes must return None
return None #classes must return None
def predict(self, img_aligned):
def predict(self, img_aligned):
# functions.detectFace returns 4 dimensional images
if len(img_aligned.shape) == 4:
img_aligned = img_aligned[0]
#functions.detectFace returns 4 dimensional images
if len(img_aligned.shape) == 4:
img_aligned = img_aligned[0]
# functions.detectFace returns bgr images
img_aligned = img_aligned[:, :, ::-1] # bgr to rgb
#functions.detectFace returns bgr images
img_aligned = img_aligned[:,:,::-1] #bgr to rgb
# deepface.detectFace returns an array in scale of [0, 1]
# but dlib expects in scale of [0, 255]
if img_aligned.max() <= 1:
img_aligned = img_aligned * 255
#deepface.detectFace returns an array in scale of [0, 1] but dlib expects in scale of [0, 255]
if img_aligned.max() <= 1:
img_aligned = img_aligned * 255
img_aligned = img_aligned.astype(np.uint8)
img_aligned = img_aligned.astype(np.uint8)
model = self.__model
model = self.__model
img_representation = model.compute_face_descriptor(img_aligned)
img_representation = model.compute_face_descriptor(img_aligned)
img_representation = np.array(img_representation)
img_representation = np.expand_dims(img_representation, axis=0)
img_representation = np.array(img_representation)
img_representation = np.expand_dims(img_representation, axis = 0)
return img_representation
return img_representation
class DlibMetaData:
def __init__(self):
self.input_shape = [[1, 150, 150, 3]]
def __init__(self):
self.input_shape = [[1, 150, 150, 3]]

View File

@ -1,4 +1,5 @@
from deepface.basemodels.DlibResNet import DlibResNet
def loadModel():
return DlibResNet()
return DlibResNet()

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +1,29 @@
from deepface.basemodels import Facenet
from pathlib import Path
import os
import gdown
from deepface.basemodels import Facenet
from deepface.commons import functions
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5'):
model = Facenet.InceptionResNetV2(dimension = 512)
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
):
#-------------------------
model = Facenet.InceptionResNetV2(dimension=512)
# -------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/facenet512_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True:
print("facenet512_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/facenet512_weights.h5'
output = home + "/.deepface/weights/facenet512_weights.h5"
gdown.download(url, output, quiet=False)
#-------------------------
# -------------------------
model.load_weights(home+'/.deepface/weights/facenet512_weights.h5')
model.load_weights(home + "/.deepface/weights/facenet512_weights.h5")
#-------------------------
# -------------------------
return model

View File

@ -1,47 +1,75 @@
import os
from pathlib import Path
import gdown
import zipfile
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, LocallyConnected2D, MaxPooling2D, Flatten, Dense, Dropout
import gdown
import tensorflow as tf
from deepface.commons import functions
#-------------------------------------
# --------------------------------
# dependency configuration
def loadModel(url = 'https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'):
base_model = Sequential()
base_model.add(Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3)))
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2'))
base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3'))
base_model.add(LocallyConnected2D(16, (9, 9), activation='relu', name='L4'))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5') )
base_model.add(LocallyConnected2D(16, (5, 5), activation='relu', name='L6'))
base_model.add(Flatten(name='F0'))
base_model.add(Dense(4096, activation='relu', name='F7'))
base_model.add(Dropout(rate=0.5, name='D0'))
base_model.add(Dense(8631, activation='softmax', name='F8'))
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
#---------------------------------
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
else:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5') != True:
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
# -------------------------------------
# pylint: disable=line-too-long
output = home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
gdown.download(url, output, quiet=False)
def loadModel(
url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
):
base_model = Sequential()
base_model.add(
Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
)
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
base_model.add(Flatten(name="F0"))
base_model.add(Dense(4096, activation="relu", name="F7"))
base_model.add(Dropout(rate=0.5, name="D0"))
base_model.add(Dense(8631, activation="softmax", name="F8"))
#unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
# ---------------------------------
base_model.load_weights(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5')
home = functions.get_deepface_home()
#drop F8 and D0. F7 is the representation layer.
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
return deepface_model
output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
gdown.download(url, output, quiet=False)
# unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(output, "r") as zip_ref:
zip_ref.extractall(home + "/.deepface/weights/")
base_model.load_weights(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5")
# drop F8 and D0. F7 is the representation layer.
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
return deepface_model

View File

@ -1,251 +1,376 @@
import os
from pathlib import Path
import gdown
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from deepface.commons import functions
#---------------------------------------
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model
from keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
from keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from keras.layers import MaxPooling2D, AveragePooling2D
from keras import backend as K
else:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras import backend as K
#url = 'https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'
# pylint: disable=unnecessary-lambda
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5'):
myInput = Input(shape=(96, 96, 3))
# ---------------------------------------
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
):
myInput = Input(shape=(96, 96, 3))
inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
x = Conv2D(64, (1, 1), name="conv2")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name="conv3")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
x = Activation("relu")(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(x) # x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
inception_3a_pool = Activation('relu')(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1")(
inception_3a_3x3
)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2")(
inception_3a_3x3
)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
inception_3a_1x1 = Activation('relu')(inception_3a_1x1)
inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1")(
inception_3a_5x5
)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2")(
inception_3a_5x5
)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_pool_bn")(
inception_3a_pool
)
inception_3a_pool = Activation("relu")(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
# Inception3b
inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_1x1_bn")(
inception_3a_1x1
)
inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3a = concatenate(
[inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3
)
inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
inception_3b_pool = Activation('relu')(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
# Inception3b
inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1")(
inception_3b_3x3
)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2")(
inception_3b_3x3
)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
inception_3b_1x1 = Activation('relu')(inception_3b_1x1)
inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1")(
inception_3b_5x5
)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2")(
inception_3b_5x5
)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)
inception_3b_pool = Lambda(lambda x: x**2, name="power2_3b")(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_pool_bn")(
inception_3b_pool
)
inception_3b_pool = Activation("relu")(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
# Inception3c
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_1x1_bn")(
inception_3b_1x1
)
inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3b = concatenate(
[inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3
)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
# Inception3c
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1")(
inception_3b
)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1")(
inception_3c_3x3
)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2")(
inception_3c_3x3
)
inception_3c_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1")(
inception_3b
)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1")(
inception_3c_5x5
)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2")(
inception_3c_5x5
)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
#inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2,2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool)
# inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1")(
inception_3c
)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2")(
inception_4a_3x3
)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool)
inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool)
inception_4a_pool = Activation('relu')(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
inception_4a_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1")(
inception_3c
)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1")(
inception_4a_5x5
)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2")(
inception_4a_5x5
)
inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1)
inception_4a_1x1 = Activation('relu')(inception_4a_1x1)
inception_4a_pool = Lambda(lambda x: x**2, name="power2_4a")(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)
inception_4a_pool = Conv2D(128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + "")(
inception_4a_pool
)
inception_4a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
)(inception_4a_pool)
inception_4a_pool = Activation("relu")(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
#inception4e
inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + "")(
inception_3c
)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + "")(
inception_4a_1x1
)
inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4a = concatenate(
[inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3
)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
# inception4e
inception_4e_3x3 = Conv2D(160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1")(
inception_4a
)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2")(
inception_4e_3x3
)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
inception_4e_5x5 = Conv2D(64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1")(
inception_4a
)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2")(
inception_4e_5x5
)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
#inception5a
inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool)
inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool)
inception_5a_pool = Activation('relu')(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1,1))(inception_5a_pool)
# inception5a
inception_5a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1")(
inception_4e
)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2")(
inception_5a_3x3
)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1)
inception_5a_1x1 = Activation('relu')(inception_5a_1x1)
inception_5a_pool = Lambda(lambda x: x**2, name="power2_5a")(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
inception_5a_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + "")(
inception_5a_pool
)
inception_5a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
)(inception_5a_pool)
inception_5a_pool = Activation("relu")(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
#inception_5b
inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1,1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + "")(
inception_4e
)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + "")(
inception_5a_1x1
)
inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool)
inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
inception_5b_pool = Activation('relu')(inception_5b_pool)
# inception_5b
inception_5b_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1")(
inception_5a
)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2")(
inception_5b_3x3
)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
inception_5b_1x1 = Activation('relu')(inception_5b_1x1)
inception_5b_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + "")(
inception_5b_pool
)
inception_5b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
)(inception_5b_pool)
inception_5b_pool = Activation("relu")(inception_5b_pool)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name='dense_layer')(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)
inception_5b_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + "")(
inception_5a
)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + "")(
inception_5b_1x1
)
inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
# Final Model
model = Model(inputs=[myInput], outputs=norm_layer)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
#-----------------------------------
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name="dense_layer")(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(dense_layer)
home = functions.get_deepface_home()
# Final Model
model = Model(inputs=[myInput], outputs=norm_layer)
if os.path.isfile(home+'/.deepface/weights/openface_weights.h5') != True:
print("openface_weights.h5 will be downloaded...")
# -----------------------------------
output = home+'/.deepface/weights/openface_weights.h5'
gdown.download(url, output, quiet=False)
home = functions.get_deepface_home()
#-----------------------------------
if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True:
print("openface_weights.h5 will be downloaded...")
model.load_weights(home+'/.deepface/weights/openface_weights.h5')
output = home + "/.deepface/weights/openface_weights.h5"
gdown.download(url, output, quiet=False)
#-----------------------------------
# -----------------------------------
return model
model.load_weights(home + "/.deepface/weights/openface_weights.h5")
# -----------------------------------
return model

View File

@ -5,25 +5,28 @@ import gdown
from deepface.commons import functions
# pylint: disable=line-too-long, too-few-public-methods
class _Layer:
input_shape = (None, 112, 112, 3)
output_shape = (None, 1, 128)
class SFaceModel:
class SFaceModel:
def __init__(self, model_path):
self.model = cv.FaceRecognizerSF.create(
model = model_path,
config = "",
backend_id = 0,
target_id = 0)
model=model_path, config="", backend_id=0, target_id=0
)
self.layers = [_Layer()]
def predict(self, image):
# Preprocess
input_blob = (image[0] * 255).astype(np.uint8) # revert the image to original format and preprocess using the model
input_blob = (image[0] * 255).astype(
np.uint8
) # revert the image to original format and preprocess using the model
# Forward
embeddings = self.model.feature(input_blob)
@ -31,11 +34,13 @@ class SFaceModel:
return embeddings
def load_model(url = "https://github.com/opencv/opencv_zoo/raw/master/models/face_recognition_sface/face_recognition_sface_2021dec.onnx"):
def load_model(
url="https://github.com/opencv/opencv_zoo/raw/master/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
):
home = functions.get_deepface_home()
file_name = home + '/.deepface/weights/face_recognition_sface_2021dec.onnx'
file_name = home + "/.deepface/weights/face_recognition_sface_2021dec.onnx"
if not os.path.isfile(file_name):
@ -43,6 +48,6 @@ def load_model(url = "https://github.com/opencv/opencv_zoo/raw/master/models/fac
gdown.download(url, file_name, quiet=False)
model = SFaceModel(model_path = file_name)
model = SFaceModel(model_path=file_name)
return model

View File

@ -1,92 +1,110 @@
import os
from pathlib import Path
import gdown
import tensorflow as tf
from deepface.commons import functions
import tensorflow as tf
# ---------------------------------------
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from keras.models import Model, Sequential
from keras.layers import (
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dropout,
Activation,
)
else:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dropout,
Activation,
)
# ---------------------------------------
#---------------------------------------
def baseModel():
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.add(Convolution2D(4096, (7, 7), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation("softmax"))
return model
return model
#url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5'):
# url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
model = baseModel()
#-----------------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
):
home = functions.get_deepface_home()
output = home+'/.deepface/weights/vgg_face_weights.h5'
model = baseModel()
if os.path.isfile(output) != True:
print("vgg_face_weights.h5 will be downloaded...")
gdown.download(url, output, quiet=False)
# -----------------------------------
#-----------------------------------
home = functions.get_deepface_home()
output = home + "/.deepface/weights/vgg_face_weights.h5"
model.load_weights(output)
if os.path.isfile(output) != True:
print("vgg_face_weights.h5 will be downloaded...")
gdown.download(url, output, quiet=False)
#-----------------------------------
# -----------------------------------
#TO-DO: why?
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
model.load_weights(output)
return vgg_face_descriptor
# -----------------------------------
# TO-DO: why?
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
return vgg_face_descriptor

View File

@ -1,16 +1,18 @@
import numpy as np
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
if type(source_representation) == list:
if isinstance(source_representation, list):
source_representation = np.array(source_representation)
if type(test_representation) == list:
if isinstance(test_representation, list):
test_representation = np.array(test_representation)
euclidean_distance = source_representation - test_representation
@ -18,26 +20,27 @@ def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
def l2_normalize(x):
return x / np.sqrt(np.sum(np.multiply(x, x)))
def findThreshold(model_name, distance_metric):
base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}
base_threshold = {"cosine": 0.40, "euclidean": 0.55, "euclidean_l2": 0.75}
thresholds = {
'VGG-Face': {'cosine': 0.40, 'euclidean': 0.60, 'euclidean_l2': 0.86},
'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80},
'Facenet512': {'cosine': 0.30, 'euclidean': 23.56, 'euclidean_l2': 1.04},
'ArcFace': {'cosine': 0.68, 'euclidean': 4.15, 'euclidean_l2': 1.13},
'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.4},
'SFace': {'cosine': 0.5932763306134152, 'euclidean': 10.734038121282206, 'euclidean_l2': 1.055836701022614},
'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55},
'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64},
'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17}
thresholds = {
"VGG-Face": {"cosine": 0.40, "euclidean": 0.60, "euclidean_l2": 0.86},
"Facenet": {"cosine": 0.40, "euclidean": 10, "euclidean_l2": 0.80},
"Facenet512": {"cosine": 0.30, "euclidean": 23.56, "euclidean_l2": 1.04},
"ArcFace": {"cosine": 0.68, "euclidean": 4.15, "euclidean_l2": 1.13},
"Dlib": {"cosine": 0.07, "euclidean": 0.6, "euclidean_l2": 0.4},
"SFace": {"cosine": 0.593, "euclidean": 10.734, "euclidean_l2": 1.055},
"OpenFace": {"cosine": 0.10, "euclidean": 0.55, "euclidean_l2": 0.55},
"DeepFace": {"cosine": 0.23, "euclidean": 64, "euclidean_l2": 0.64},
"DeepID": {"cosine": 0.015, "euclidean": 45, "euclidean_l2": 0.17},
}
}
threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
return threshold
return threshold

View File

@ -1,209 +1,255 @@
import os
import numpy as np
import pandas as pd
import cv2
import base64
from pathlib import Path
from PIL import Image
import requests
# 3rd party dependencies
import numpy as np
import cv2
import tensorflow as tf
# package dependencies
from deepface.detectors import FaceDetector
import tensorflow as tf
# --------------------------------------------------
# configurations of dependencies
tf_version = tf.__version__
tf_major_version = int(tf_version.split(".")[0])
tf_major_version = int(tf_version.split(".", maxsplit=1)[0])
tf_minor_version = int(tf_version.split(".")[1])
if tf_major_version == 1:
import keras
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
from keras.preprocessing import image
elif tf_major_version == 2:
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing import image
# --------------------------------------------------
#--------------------------------------------------
def initialize_folder():
home = get_deepface_home()
home = get_deepface_home()
if not os.path.exists(home+"/.deepface"):
os.makedirs(home+"/.deepface")
print("Directory ", home, "/.deepface created")
if not os.path.exists(home + "/.deepface"):
os.makedirs(home + "/.deepface")
print("Directory ", home, "/.deepface created")
if not os.path.exists(home + "/.deepface/weights"):
os.makedirs(home + "/.deepface/weights")
print("Directory ", home, "/.deepface/weights created")
if not os.path.exists(home+"/.deepface/weights"):
os.makedirs(home+"/.deepface/weights")
print("Directory ", home, "/.deepface/weights created")
def get_deepface_home():
return str(os.getenv('DEEPFACE_HOME', default=Path.home()))
return str(os.getenv("DEEPFACE_HOME", default=str(Path.home())))
# --------------------------------------------------
#--------------------------------------------------
def loadBase64Img(uri):
encoded_data = uri.split(',')[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
encoded_data = uri.split(",")[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def load_image(img):
exact_image = False; base64_img = False; url_img = False
exact_image = False
base64_img = False
url_img = False
if type(img).__module__ == np.__name__:
exact_image = True
if type(img).__module__ == np.__name__:
exact_image = True
elif len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
elif len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
elif len(img) > 11 and img.startswith("http"):
url_img = True
elif len(img) > 11 and img.startswith("http"):
url_img = True
#---------------------------
# ---------------------------
if base64_img == True:
img = loadBase64Img(img)
if base64_img is True:
img = loadBase64Img(img)
elif url_img:
img = np.array(Image.open(requests.get(img, stream=True).raw).convert('RGB'))
elif url_img is True:
img = np.array(Image.open(requests.get(img, stream=True).raw).convert("RGB"))
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
elif exact_image is not True: # image path passed as input
if os.path.isfile(img) is not True:
raise ValueError(f"Confirm that {img} exists")
img = cv2.imread(img)
img = cv2.imread(img)
return img
return img
#--------------------------------------------------
def extract_faces(img, target_size=(224, 224), detector_backend = 'opencv', grayscale = False, enforce_detection = True, align = True):
# --------------------------------------------------
# this is going to store a list of img itself (numpy), it region and confidence
extracted_faces = []
#img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img = load_image(img)
img_region = [0, 0, img.shape[1], img.shape[0]]
def extract_faces(
img,
target_size=(224, 224),
detector_backend="opencv",
grayscale=False,
enforce_detection=True,
align=True,
):
if detector_backend == 'skip':
face_objs = [(img, img_region, 0)]
else:
face_detector = FaceDetector.build_model(detector_backend)
face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align)
# this is going to store a list of img itself (numpy), it region and confidence
extracted_faces = []
# in case of no face found
if len(face_objs) == 0 and enforce_detection == True:
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
elif len(face_objs) == 0 and enforce_detection == False:
face_objs = [(img, img_region, 0)]
# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img = load_image(img)
img_region = [0, 0, img.shape[1], img.shape[0]]
for current_img, current_region, confidence in face_objs:
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
if detector_backend == "skip":
face_objs = [(img, img_region, 0)]
else:
face_detector = FaceDetector.build_model(detector_backend)
face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align)
if grayscale == True:
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
# in case of no face found
if len(face_objs) == 0 and enforce_detection is True:
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo "
+ "or consider to set enforce_detection param to False."
)
# resize and padding
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
factor_0 = target_size[0] / current_img.shape[0]
factor_1 = target_size[1] / current_img.shape[1]
factor = min(factor_0, factor_1)
if len(face_objs) == 0 and enforce_detection is False:
face_objs = [(img, img_region, 0)]
dsize = (int(current_img.shape[1] * factor), int(current_img.shape[0] * factor))
current_img = cv2.resize(current_img, dsize)
for current_img, current_region, confidence in face_objs:
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
diff_0 = target_size[0] - current_img.shape[0]
diff_1 = target_size[1] - current_img.shape[1]
if grayscale == False:
# Put the base image in the middle of the padded image
current_img = np.pad(current_img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')
else:
current_img = np.pad(current_img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')
if grayscale is True:
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
#double check: if target image is not still the same size with target.
if current_img.shape[0:2] != target_size:
current_img = cv2.resize(current_img, target_size)
# resize and padding
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
factor_0 = target_size[0] / current_img.shape[0]
factor_1 = target_size[1] / current_img.shape[1]
factor = min(factor_0, factor_1)
#normalizing the image pixels
img_pixels = image.img_to_array(current_img) #what this line doing? must?
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #normalize input in [0, 1]
dsize = (int(current_img.shape[1] * factor), int(current_img.shape[0] * factor))
current_img = cv2.resize(current_img, dsize)
#int cast is for the exception - object of type 'float32' is not JSON serializable
region_obj = {"x": int(current_region[0]), "y": int(current_region[1]), "w": int(current_region[2]), "h": int(current_region[3])}
diff_0 = target_size[0] - current_img.shape[0]
diff_1 = target_size[1] - current_img.shape[1]
if grayscale is False:
# Put the base image in the middle of the padded image
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
(0, 0),
),
"constant",
)
else:
current_img = np.pad(
current_img,
((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)),
"constant",
)
extracted_face = [img_pixels, region_obj, confidence]
extracted_faces.append(extracted_face)
# double check: if target image is not still the same size with target.
if current_img.shape[0:2] != target_size:
current_img = cv2.resize(current_img, target_size)
if len(extracted_faces) == 0 and enforce_detection == True:
raise ValueError("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.")
# normalizing the image pixels
img_pixels = image.img_to_array(current_img) # what this line doing? must?
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255 # normalize input in [0, 1]
return extracted_faces
# int cast is for the exception - object of type 'float32' is not JSON serializable
region_obj = {
"x": int(current_region[0]),
"y": int(current_region[1]),
"w": int(current_region[2]),
"h": int(current_region[3]),
}
def normalize_input(img, normalization = 'base'):
extracted_face = [img_pixels, region_obj, confidence]
extracted_faces.append(extracted_face)
#issue 131 declares that some normalization techniques improves the accuracy
if len(extracted_faces) == 0 and enforce_detection == True:
raise ValueError(
"Detected face shape is ",
img.shape,
". Consider to set enforce_detection argument to False.",
)
if normalization == 'base':
return img
else:
#@trevorgribble and @davedgd contributed this feature
return extracted_faces
img *= 255 #restore input in scale of [0, 255] because it was normalized in scale of [0, 1] in preprocess_face
if normalization == 'raw':
pass #return just restored pixels
def normalize_input(img, normalization="base"):
elif normalization == 'Facenet':
mean, std = img.mean(), img.std()
img = (img - mean) / std
# issue 131 declares that some normalization techniques improves the accuracy
elif(normalization=="Facenet2018"):
# simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)
img /= 127.5
img -= 1
if normalization == "base":
return img
elif normalization == 'VGGFace':
# mean subtraction based on VGGFace1 training data
img[..., 0] -= 93.5940
img[..., 1] -= 104.7624
img[..., 2] -= 129.1863
# @trevorgribble and @davedgd contributed this feature
# restore input in scale of [0, 255] because it was normalized in scale of
# [0, 1] in preprocess_face
img *= 255
elif(normalization == 'VGGFace2'):
# mean subtraction based on VGGFace2 training data
img[..., 0] -= 91.4953
img[..., 1] -= 103.8827
img[..., 2] -= 131.0912
if normalization == "raw":
pass # return just restored pixels
elif(normalization == 'ArcFace'):
#Reference study: The faces are cropped and resized to 112×112,
#and each pixel (ranged between [0, 255]) in RGB images is normalised
#by subtracting 127.5 then divided by 128.
img -= 127.5
img /= 128
elif normalization == "Facenet":
mean, std = img.mean(), img.std()
img = (img - mean) / std
#-----------------------------
elif normalization == "Facenet2018":
# simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)
img /= 127.5
img -= 1
elif normalization == "VGGFace":
# mean subtraction based on VGGFace1 training data
img[..., 0] -= 93.5940
img[..., 1] -= 104.7624
img[..., 2] -= 129.1863
elif normalization == "VGGFace2":
# mean subtraction based on VGGFace2 training data
img[..., 0] -= 91.4953
img[..., 1] -= 103.8827
img[..., 2] -= 131.0912
elif normalization == "ArcFace":
# Reference study: The faces are cropped and resized to 112×112,
# and each pixel (ranged between [0, 255]) in RGB images is normalised
# by subtracting 127.5 then divided by 128.
img -= 127.5
img /= 128
else:
raise ValueError(f"unimplemented normalization type - {normalization}")
return img
return img
def find_target_size(model_name):
target_sizes = {
"VGG-Face": (224, 224),
"Facenet": (160, 160),
"Facenet512": (160, 160),
"OpenFace": (96, 96),
"DeepFace": (152, 152),
"DeepID": (55, 47),
"Dlib": (150, 150),
"ArcFace": (112, 112),
"SFace": (112, 112)
}
target_sizes = {
"VGG-Face": (224, 224),
"Facenet": (160, 160),
"Facenet512": (160, 160),
"OpenFace": (96, 96),
"DeepFace": (152, 152),
"DeepID": (55, 47),
"Dlib": (150, 150),
"ArcFace": (112, 112),
"SFace": (112, 112),
}
if model_name not in target_sizes.keys():
raise ValueError(f"unimplemented model name - {model_name}")
target_size = target_sizes.get(model_name)
return target_sizes[model_name]
if target_size == None:
raise ValueError(f"unimplemented model name - {model_name}")
return target_size

File diff suppressed because it is too large Load Diff

View File

@ -1,75 +1,78 @@
from pathlib import Path
import gdown
import bz2
import os
import bz2
import gdown
from deepface.commons import functions
def build_model():
home = functions.get_deepface_home()
home = functions.get_deepface_home()
import dlib #this requirement is not a must that's why imported here
import dlib # this requirement is not a must that's why imported here
#check required file exists in the home/.deepface/weights folder
if os.path.isfile(home+'/.deepface/weights/shape_predictor_5_face_landmarks.dat') != True:
# check required file exists in the home/.deepface/weights folder
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
print("shape_predictor_5_face_landmarks.dat.bz2 is going to be downloaded")
file_name = "shape_predictor_5_face_landmarks.dat.bz2"
print(f"{file_name} is going to be downloaded")
url = "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
output = home+'/.deepface/weights/'+url.split("/")[-1]
url = f"http://dlib.net/files/{file_name}"
output = f"{home}/.deepface/weights/{file_name}"
gdown.download(url, output, quiet=False)
gdown.download(url, output, quiet=False)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] #discard .bz2 extension
open(newfilepath, 'wb').write(data)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] # discard .bz2 extension
with open(newfilepath, "wb") as f:
f.write(data)
face_detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat")
face_detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat")
detector = {}
detector["face_detector"] = face_detector
detector["sp"] = sp
return detector
def detect_face(detector, img, align = True):
import dlib #this requirement is not a must that's why imported here
resp = []
home = str(Path.home())
sp = detector["sp"]
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
face_detector = detector["face_detector"]
#note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
detections, scores, _ = face_detector.run(img, 1)
if len(detections) > 0:
for idx, d in enumerate(detections):
left = d.left(); right = d.right()
top = d.top(); bottom = d.bottom()
#detected_face = img[top:bottom, left:right]
detected_face = img[max(0, top): min(bottom, img.shape[0]), max(0, left): min(right, img.shape[1])]
img_region = [left, top, right - left, bottom - top]
confidence = scores[idx]
if align:
img_shape = sp(img, detections[idx])
detected_face = dlib.get_face_chip(img, img_shape, size = detected_face.shape[0])
resp.append((detected_face, img_region, confidence))
detector = {}
detector["face_detector"] = face_detector
detector["sp"] = sp
return detector
return resp
def detect_face(detector, img, align=True):
import dlib # this requirement is not a must that's why imported here
resp = []
sp = detector["sp"]
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
face_detector = detector["face_detector"]
# note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
detections, scores, _ = face_detector.run(img, 1)
if len(detections) > 0:
for idx, d in enumerate(detections):
left = d.left()
right = d.right()
top = d.top()
bottom = d.bottom()
# detected_face = img[top:bottom, left:right]
detected_face = img[
max(0, top) : min(bottom, img.shape[0]), max(0, left) : min(right, img.shape[1])
]
img_region = [left, top, right - left, bottom - top]
confidence = scores[idx]
if align:
img_shape = sp(img, detections[idx])
detected_face = dlib.get_face_chip(img, img_shape, size=detected_face.shape[0])
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -1,113 +1,123 @@
from deepface.detectors import OpenCvWrapper, SsdWrapper, DlibWrapper, MtcnnWrapper, RetinaFaceWrapper,MediapipeWrapper
from PIL import Image
import math
from PIL import Image
import numpy as np
from deepface.commons import distance
from deepface.detectors import (
OpenCvWrapper,
SsdWrapper,
DlibWrapper,
MtcnnWrapper,
RetinaFaceWrapper,
MediapipeWrapper,
)
def build_model(detector_backend):
global face_detector_obj #singleton design pattern
global face_detector_obj # singleton design pattern
backends = {
'opencv': OpenCvWrapper.build_model,
'ssd': SsdWrapper.build_model,
'dlib': DlibWrapper.build_model,
'mtcnn': MtcnnWrapper.build_model,
'retinaface': RetinaFaceWrapper.build_model,
'mediapipe': MediapipeWrapper.build_model
"opencv": OpenCvWrapper.build_model,
"ssd": SsdWrapper.build_model,
"dlib": DlibWrapper.build_model,
"mtcnn": MtcnnWrapper.build_model,
"retinaface": RetinaFaceWrapper.build_model,
"mediapipe": MediapipeWrapper.build_model,
}
if not "face_detector_obj" in globals():
face_detector_obj = {}
if not detector_backend in face_detector_obj.keys():
built_models = list(face_detector_obj.keys())
if detector_backend not in built_models:
face_detector = backends.get(detector_backend)
if face_detector:
face_detector = face_detector()
face_detector_obj[detector_backend] = face_detector
#print(detector_backend," built")
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
return face_detector_obj[detector_backend]
def detect_face(face_detector, detector_backend, img, align = True):
def detect_face(face_detector, detector_backend, img, align=True):
obj = detect_faces(face_detector, detector_backend, img, align)
if len(obj) > 0:
face, region, confidence = obj[0] #discard multiple faces
else: #len(obj) == 0
face, region, confidence = obj[0] # discard multiple faces
else: # len(obj) == 0
face = None
region = [0, 0, img.shape[1], img.shape[0]]
return face, region, confidence
def detect_faces(face_detector, detector_backend, img, align = True):
def detect_faces(face_detector, detector_backend, img, align=True):
backends = {
'opencv': OpenCvWrapper.detect_face,
'ssd': SsdWrapper.detect_face,
'dlib': DlibWrapper.detect_face,
'mtcnn': MtcnnWrapper.detect_face,
'retinaface': RetinaFaceWrapper.detect_face,
'mediapipe': MediapipeWrapper.detect_face
"opencv": OpenCvWrapper.detect_face,
"ssd": SsdWrapper.detect_face,
"dlib": DlibWrapper.detect_face,
"mtcnn": MtcnnWrapper.detect_face,
"retinaface": RetinaFaceWrapper.detect_face,
"mediapipe": MediapipeWrapper.detect_face,
}
detect_face = backends.get(detector_backend)
if detect_face:
obj = detect_face(face_detector, img, align)
#obj stores list of (detected_face, region, confidence)
detect_face_fn = backends.get(detector_backend)
if detect_face_fn: # pylint: disable=no-else-return
obj = detect_face_fn(face_detector, img, align)
# obj stores list of (detected_face, region, confidence)
return obj
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
def alignment_procedure(img, left_eye, right_eye):
#this function aligns given face in img based on left and right eye coordinates
# this function aligns given face in img based on left and right eye coordinates
left_eye_x, left_eye_y = left_eye
right_eye_x, right_eye_y = right_eye
left_eye_x, left_eye_y = left_eye
right_eye_x, right_eye_y = right_eye
#-----------------------
#find rotation direction
# -----------------------
# find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 # rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 # rotate inverse direction of clock
#-----------------------
#find length of triangle edges
# -----------------------
# find length of triangle edges
a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd))
b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd))
c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye))
a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd))
b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd))
c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye))
#-----------------------
# -----------------------
#apply cosine rule
# apply cosine rule
if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation
if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation
cos_a = (b*b + c*c - a*a)/(2*b*c)
angle = np.arccos(cos_a) #angle in radian
angle = (angle * 180) / math.pi #radian to degree
cos_a = (b * b + c * c - a * a) / (2 * b * c)
angle = np.arccos(cos_a) # angle in radian
angle = (angle * 180) / math.pi # radian to degree
#-----------------------
#rotate base image
# -----------------------
# rotate base image
if direction == -1:
angle = 90 - angle
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img)
img = np.array(img.rotate(direction * angle))
img = Image.fromarray(img)
img = np.array(img.rotate(direction * angle))
#-----------------------
# -----------------------
return img #return img anyway
return img # return img anyway

View File

@ -1,26 +1,28 @@
from deepface.detectors import FaceDetector
# Link - https://google.github.io/mediapipe/solutions/face_detection
def build_model():
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
import mediapipe as mp # this is not a must dependency. do not import it in the global level.
mp_face_detection = mp.solutions.face_detection
face_detection = mp_face_detection.FaceDetection( min_detection_confidence=0.7)
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
return face_detection
def detect_face(face_detector, img, align = True):
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
def detect_face(face_detector, img, align=True):
resp = []
img_width = img.shape[1]; img_height = img.shape[0]
img_width = img.shape[1]
img_height = img.shape[0]
results = face_detector.process(img)
if results.detections:
for detection in results.detections:
confidence, = detection.score
(confidence,) = detection.score
bounding_box = detection.location_data.relative_bounding_box
landmarks = detection.location_data.relative_keypoints
@ -32,17 +34,19 @@ def detect_face(face_detector, img, align = True):
right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
#nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
#mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
#right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
#left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
# nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
# mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
# right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
if x > 0 and y > 0:
detected_face = img[y:y+h, x:x+w]
detected_face = img[y : y + h, x : x + w]
img_region = [x, y, w, h]
if align:
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
detected_face = FaceDetector.alignment_procedure(
detected_face, left_eye, right_eye
)
resp.append((detected_face, img_region, confidence))

View File

@ -1,35 +1,38 @@
import cv2
from deepface.detectors import FaceDetector
def build_model():
from mtcnn import MTCNN
face_detector = MTCNN()
return face_detector
from mtcnn import MTCNN
def detect_face(face_detector, img, align = True):
face_detector = MTCNN()
return face_detector
resp = []
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
def detect_face(face_detector, img, align=True):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
detections = face_detector.detect_faces(img_rgb)
resp = []
if len(detections) > 0:
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
for detection in detections:
x, y, w, h = detection["box"]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
img_region = [x, y, w, h]
confidence = detection["confidence"]
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
detections = face_detector.detect_faces(img_rgb)
if align:
keypoints = detection["keypoints"]
left_eye = keypoints["left_eye"]
right_eye = keypoints["right_eye"]
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
if len(detections) > 0:
resp.append((detected_face, img_region, confidence))
for detection in detections:
x, y, w, h = detection["box"]
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
img_region = [x, y, w, h]
confidence = detection["confidence"]
return resp
if align:
keypoints = detection["keypoints"]
left_eye = keypoints["left_eye"]
right_eye = keypoints["right_eye"]
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -1,112 +1,123 @@
import cv2
import os
import pandas as pd
import cv2
from deepface.detectors import FaceDetector
def build_model():
detector ={}
detector["face_detector"] = build_cascade('haarcascade')
detector["eye_detector"] = build_cascade('haarcascade_eye')
return detector
def build_cascade(model_name = 'haarcascade'):
opencv_path = get_opencv_path()
if model_name == 'haarcascade':
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
detector = {}
detector["face_detector"] = build_cascade("haarcascade")
detector["eye_detector"] = build_cascade("haarcascade_eye")
return detector
face_detector = cv2.CascadeClassifier(face_detector_path)
return face_detector
def build_cascade(model_name="haarcascade"):
opencv_path = get_opencv_path()
if model_name == "haarcascade":
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
detector = cv2.CascadeClassifier(face_detector_path)
elif model_name == 'haarcascade_eye':
eye_detector_path = opencv_path+"haarcascade_eye.xml"
elif model_name == "haarcascade_eye":
eye_detector_path = opencv_path + "haarcascade_eye.xml"
if os.path.isfile(eye_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
eye_detector_path,
" violated.",
)
detector = cv2.CascadeClassifier(eye_detector_path)
if os.path.isfile(eye_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",eye_detector_path," violated.")
else:
raise ValueError(f"unimplemented model_name for build_cascade - {model_name}")
eye_detector = cv2.CascadeClassifier(eye_detector_path)
return eye_detector
return detector
def detect_face(detector, img, align = True):
resp = []
def detect_face(detector, img, align=True):
resp = []
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
faces = []
try:
#faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
faces = []
try:
# faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
#note that, by design, opencv's haarcascade scores are >0 but not capped at 1
faces, _, scores = detector["face_detector"].detectMultiScale3(img, 1.1, 10, outputRejectLevels = True)
except:
pass
# note that, by design, opencv's haarcascade scores are >0 but not capped at 1
faces, _, scores = detector["face_detector"].detectMultiScale3(
img, 1.1, 10, outputRejectLevels=True
)
except:
pass
if len(faces) > 0:
if len(faces) > 0:
for (x,y,w,h), confidence in zip(faces, scores):
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
for (x, y, w, h), confidence in zip(faces, scores):
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
if align:
detected_face = align_face(detector["eye_detector"], detected_face)
if align:
detected_face = align_face(detector["eye_detector"], detected_face)
img_region = [x, y, w, h]
img_region = [x, y, w, h]
resp.append((detected_face, img_region, confidence))
resp.append((detected_face, img_region, confidence))
return resp
return resp
def align_face(eye_detector, img):
detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #eye detector expects gray scale image
detected_face_gray = cv2.cvtColor(
img, cv2.COLOR_BGR2GRAY
) # eye detector expects gray scale image
#eyes = eye_detector.detectMultiScale(detected_face_gray, 1.3, 5)
eyes = eye_detector.detectMultiScale(detected_face_gray, 1.1, 10)
# eyes = eye_detector.detectMultiScale(detected_face_gray, 1.3, 5)
eyes = eye_detector.detectMultiScale(detected_face_gray, 1.1, 10)
#----------------------------------------------------------------
# ----------------------------------------------------------------
#opencv eye detectin module is not strong. it might find more than 2 eyes!
#besides, it returns eyes with different order in each call (issue 435)
#this is an important issue because opencv is the default detector and ssd also uses this
#find the largest 2 eye. Thanks to @thelostpeace
# opencv eye detectin module is not strong. it might find more than 2 eyes!
# besides, it returns eyes with different order in each call (issue 435)
# this is an important issue because opencv is the default detector and ssd also uses this
# find the largest 2 eye. Thanks to @thelostpeace
eyes = sorted(eyes, key = lambda v: abs((v[0] - v[2]) * (v[1] - v[3])), reverse=True)
eyes = sorted(eyes, key=lambda v: abs((v[0] - v[2]) * (v[1] - v[3])), reverse=True)
#----------------------------------------------------------------
# ----------------------------------------------------------------
if len(eyes) >= 2:
if len(eyes) >= 2:
#decide left and right eye
# decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
eye_1 = eyes[0]
eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1; right_eye = eye_2
else:
left_eye = eye_2; right_eye = eye_1
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
# -----------------------
# find center of eyes
left_eye = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
right_eye = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))
img = FaceDetector.alignment_procedure(img, left_eye, right_eye)
return img # return img anyway
#-----------------------
#find center of eyes
left_eye = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
right_eye = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
img = FaceDetector.alignment_procedure(img, left_eye, right_eye)
return img #return img anyway
def get_opencv_path():
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
return path+"/data/"
return path + "/data/"

View File

@ -1,42 +1,25 @@
#from retinaface import RetinaFace #this is not a must dependency
import cv2
def build_model():
from retinaface import RetinaFace
from retinaface import RetinaFace # this is not a must dependency
face_detector = RetinaFace.build_model()
return face_detector
def detect_face(face_detector, img, align = True):
from retinaface import RetinaFace
def detect_face(face_detector, img, align=True):
from retinaface import RetinaFace # this is not a must dependency
from retinaface.commons import postprocess
#---------------------------------
# ---------------------------------
resp = []
# The BGR2RGB conversion will be done in the preprocessing step of retinaface.
# img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR
# --------------------------
"""
face = None
img_region = [0, 0, img.shape[1], img.shape[0]] #Really?
obj = RetinaFace.detect_faces(img, model=face_detector, threshold=0.9)
faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)
if len(faces) > 0:
face = faces[0][:, :, ::-1]
return face, img_region
"""
#--------------------------
obj = RetinaFace.detect_faces(img, model = face_detector, threshold = 0.9)
if type(obj) == dict:
for key in obj:
identity = obj[key]
if isinstance(obj, dict):
for identity in obj.items():
facial_area = identity["facial_area"]
y = facial_area[1]
@ -46,18 +29,20 @@ def detect_face(face_detector, img, align = True):
img_region = [x, y, w, h]
confidence = identity["score"]
#detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
# detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1] : facial_area[3], facial_area[0] : facial_area[2]]
if align:
landmarks = identity["landmarks"]
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
nose = landmarks["nose"]
#mouth_right = landmarks["mouth_right"]
#mouth_left = landmarks["mouth_left"]
# mouth_right = landmarks["mouth_right"]
# mouth_left = landmarks["mouth_left"]
detected_face = postprocess.alignment_procedure(detected_face, right_eye, left_eye, nose)
detected_face = postprocess.alignment_procedure(
detected_face, right_eye, left_eye, nose
)
resp.append((detected_face, img_region, confidence))

View File

@ -1,103 +1,113 @@
import gdown
from pathlib import Path
import os
import gdown
import cv2
import pandas as pd
from deepface.detectors import OpenCvWrapper
from deepface.commons import functions
# pylint: disable=line-too-long
def build_model():
home = functions.get_deepface_home()
home = functions.get_deepface_home()
#model structure
if os.path.isfile(home+'/.deepface/weights/deploy.prototxt') != True:
# model structure
if os.path.isfile(home + "/.deepface/weights/deploy.prototxt") != True:
print("deploy.prototxt will be downloaded...")
print("deploy.prototxt will be downloaded...")
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
output = home+'/.deepface/weights/deploy.prototxt'
output = home + "/.deepface/weights/deploy.prototxt"
gdown.download(url, output, quiet=False)
gdown.download(url, output, quiet=False)
#pre-trained weights
if os.path.isfile(home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel') != True:
# pre-trained weights
if os.path.isfile(home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel") != True:
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
output = home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel'
output = home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
gdown.download(url, output, quiet=False)
gdown.download(url, output, quiet=False)
face_detector = cv2.dnn.readNetFromCaffe(
home+"/.deepface/weights/deploy.prototxt",
home+"/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
)
face_detector = cv2.dnn.readNetFromCaffe(
home + "/.deepface/weights/deploy.prototxt",
home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel",
)
eye_detector = OpenCvWrapper.build_cascade("haarcascade_eye")
eye_detector = OpenCvWrapper.build_cascade("haarcascade_eye")
detector = {}
detector["face_detector"] = face_detector
detector["eye_detector"] = eye_detector
detector = {}
detector["face_detector"] = face_detector
detector["eye_detector"] = eye_detector
return detector
return detector
def detect_face(detector, img, align = True):
resp = []
def detect_face(detector, img, align=True):
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
resp = []
ssd_labels = ["img_id", "is_face", "confidence", "left", "top", "right", "bottom"]
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
target_size = (300, 300)
ssd_labels = ["img_id", "is_face", "confidence", "left", "top", "right", "bottom"]
base_img = img.copy() #we will restore base_img to img later
target_size = (300, 300)
original_size = img.shape
base_img = img.copy() # we will restore base_img to img later
img = cv2.resize(img, target_size)
original_size = img.shape
aspect_ratio_x = (original_size[1] / target_size[1])
aspect_ratio_y = (original_size[0] / target_size[0])
img = cv2.resize(img, target_size)
imageBlob = cv2.dnn.blobFromImage(image = img)
aspect_ratio_x = original_size[1] / target_size[1]
aspect_ratio_y = original_size[0] / target_size[0]
face_detector = detector["face_detector"]
face_detector.setInput(imageBlob)
detections = face_detector.forward()
imageBlob = cv2.dnn.blobFromImage(image=img)
detections_df = pd.DataFrame(detections[0][0], columns = ssd_labels)
face_detector = detector["face_detector"]
face_detector.setInput(imageBlob)
detections = face_detector.forward()
detections_df = detections_df[detections_df['is_face'] == 1] #0: background, 1: face
detections_df = detections_df[detections_df['confidence'] >= 0.90]
detections_df = pd.DataFrame(detections[0][0], columns=ssd_labels)
detections_df['left'] = (detections_df['left'] * 300).astype(int)
detections_df['bottom'] = (detections_df['bottom'] * 300).astype(int)
detections_df['right'] = (detections_df['right'] * 300).astype(int)
detections_df['top'] = (detections_df['top'] * 300).astype(int)
detections_df = detections_df[detections_df["is_face"] == 1] # 0: background, 1: face
detections_df = detections_df[detections_df["confidence"] >= 0.90]
if detections_df.shape[0] > 0:
detections_df["left"] = (detections_df["left"] * 300).astype(int)
detections_df["bottom"] = (detections_df["bottom"] * 300).astype(int)
detections_df["right"] = (detections_df["right"] * 300).astype(int)
detections_df["top"] = (detections_df["top"] * 300).astype(int)
for index, instance in detections_df.iterrows():
if detections_df.shape[0] > 0:
left = instance["left"]
right = instance["right"]
bottom = instance["bottom"]
top = instance["top"]
for _, instance in detections_df.iterrows():
detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)]
img_region = [int(left*aspect_ratio_x), int(top*aspect_ratio_y), int(right*aspect_ratio_x) - int(left*aspect_ratio_x), int(bottom*aspect_ratio_y) - int(top*aspect_ratio_y)]
confidence = instance["confidence"]
left = instance["left"]
right = instance["right"]
bottom = instance["bottom"]
top = instance["top"]
if align:
detected_face = OpenCvWrapper.align_face(detector["eye_detector"], detected_face)
detected_face = base_img[
int(top * aspect_ratio_y) : int(bottom * aspect_ratio_y),
int(left * aspect_ratio_x) : int(right * aspect_ratio_x),
]
img_region = [
int(left * aspect_ratio_x),
int(top * aspect_ratio_y),
int(right * aspect_ratio_x) - int(left * aspect_ratio_x),
int(bottom * aspect_ratio_y) - int(top * aspect_ratio_y),
]
confidence = instance["confidence"]
resp.append((detected_face, img_region, confidence))
if align:
detected_face = OpenCvWrapper.align_face(detector["eye_detector"], detected_face)
return resp
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -1,60 +1,63 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# ----------------------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
#url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
# ----------------------------------------
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5'):
model = VGGFace.baseModel()
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5",
):
#--------------------------
model = VGGFace.baseModel()
classes = 101
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
# --------------------------
#--------------------------
classes = 101
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
age_model = Model(inputs=model.input, outputs=base_model_output)
# --------------------------
#--------------------------
age_model = Model(inputs=model.input, outputs=base_model_output)
#load weights
# --------------------------
home = functions.get_deepface_home()
# load weights
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
print("age_model_weights.h5 will be downloaded...")
home = functions.get_deepface_home()
output = home+'/.deepface/weights/age_model_weights.h5'
gdown.download(url, output, quiet=False)
if os.path.isfile(home + "/.deepface/weights/age_model_weights.h5") != True:
print("age_model_weights.h5 will be downloaded...")
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
output = home + "/.deepface/weights/age_model_weights.h5"
gdown.download(url, output, quiet=False)
return age_model
age_model.load_weights(home + "/.deepface/weights/age_model_weights.h5")
return age_model
# --------------------------
#--------------------------
def findApparentAge(age_predictions):
output_indexes = np.array([i for i in range(0, 101)])
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age
output_indexes = np.array(list(range(0, 101)))
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age

View File

@ -1,74 +1,72 @@
import os
import gdown
from pathlib import Path
import zipfile
import tensorflow as tf
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# -------------------------------------------
# pylint: disable=line-too-long
# -------------------------------------------
# dependency configuration
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
AveragePooling2D,
Flatten,
Dense,
Dropout,
)
# -------------------------------------------
#url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5'):
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
):
num_classes = 7
num_classes = 7
model = Sequential()
model = Sequential()
#1st convolution layer
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))
model.add(MaxPooling2D(pool_size=(5,5), strides=(2, 2)))
# 1st convolution layer
model.add(Conv2D(64, (5, 5), activation="relu", input_shape=(48, 48, 1)))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
#2nd convolution layer
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
# 2nd convolution layer
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
#3rd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
# 3rd convolution layer
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Flatten())
model.add(Flatten())
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
# fully connected neural networks
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.add(Dense(num_classes, activation="softmax"))
#----------------------------
# ----------------------------
home = functions.get_deepface_home()
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/facial_expression_model_weights.h5') != True:
print("facial_expression_model_weights.h5 will be downloaded...")
if os.path.isfile(home + "/.deepface/weights/facial_expression_model_weights.h5") != True:
print("facial_expression_model_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/facial_expression_model_weights.h5'
gdown.download(url, output, quiet=False)
output = home + "/.deepface/weights/facial_expression_model_weights.h5"
gdown.download(url, output, quiet=False)
"""
#google drive source downloads zip
output = home+'/.deepface/weights/facial_expression_model_weights.zip'
gdown.download(url, output, quiet=False)
model.load_weights(home + "/.deepface/weights/facial_expression_model_weights.h5")
#unzip facial_expression_model_weights.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
"""
model.load_weights(home+'/.deepface/weights/facial_expression_model_weights.h5')
return model
return model

View File

@ -1,54 +1,55 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
# -------------------------------------
# pylint: disable=line-too-long
# -------------------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# -------------------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
):
#url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
model = VGGFace.baseModel()
# --------------------------
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5'):
classes = 2
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
model = VGGFace.baseModel()
# --------------------------
#--------------------------
gender_model = Model(inputs=model.input, outputs=base_model_output)
classes = 2
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
# --------------------------
#--------------------------
# load weights
gender_model = Model(inputs=model.input, outputs=base_model_output)
home = functions.get_deepface_home()
#--------------------------
if os.path.isfile(home + "/.deepface/weights/gender_model_weights.h5") != True:
print("gender_model_weights.h5 will be downloaded...")
#load weights
output = home + "/.deepface/weights/gender_model_weights.h5"
gdown.download(url, output, quiet=False)
home = functions.get_deepface_home()
gender_model.load_weights(home + "/.deepface/weights/gender_model_weights.h5")
if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
print("gender_model_weights.h5 will be downloaded...")
return gender_model
output = home+'/.deepface/weights/gender_model_weights.h5'
gdown.download(url, output, quiet=False)
gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
return gender_model
#--------------------------
# --------------------------

View File

@ -1,65 +1,52 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import zipfile
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# --------------------------
# pylint: disable=line-too-long
# --------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
# --------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
):
#url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
model = VGGFace.baseModel()
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5'):
# --------------------------
model = VGGFace.baseModel()
classes = 6
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
#--------------------------
# --------------------------
classes = 6
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
race_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
# --------------------------
race_model = Model(inputs=model.input, outputs=base_model_output)
# load weights
#--------------------------
home = functions.get_deepface_home()
#load weights
if os.path.isfile(home + "/.deepface/weights/race_model_single_batch.h5") != True:
print("race_model_single_batch.h5 will be downloaded...")
home = functions.get_deepface_home()
output = home + "/.deepface/weights/race_model_single_batch.h5"
gdown.download(url, output, quiet=False)
if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
print("race_model_single_batch.h5 will be downloaded...")
race_model.load_weights(home + "/.deepface/weights/race_model_single_batch.h5")
output = home+'/.deepface/weights/race_model_single_batch.h5'
gdown.download(url, output, quiet=False)
"""
#google drive source downloads zip
output = home+'/.deepface/weights/race_model_single_batch.zip'
gdown.download(url, output, quiet=False)
#unzip race_model_single_batch.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
"""
race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')
return race_model
#--------------------------
return race_model

View File

@ -1,266 +0,0 @@
import pandas as pd
import numpy as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc, roc_auc_score
import matplotlib.pyplot as plt
from tqdm import tqdm
tqdm.pandas()
#--------------------------
#Data set
# Ref: https://github.com/serengil/deepface/tree/master/tests/dataset
idendities = {
"Angelina": ["img1.jpg", "img2.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img10.jpg", "img11.jpg"],
"Scarlett": ["img8.jpg", "img9.jpg", "img47.jpg", "img48.jpg", "img49.jpg", "img50.jpg", "img51.jpg"],
"Jennifer": ["img3.jpg", "img12.jpg", "img53.jpg", "img54.jpg", "img55.jpg", "img56.jpg"],
"Mark": ["img13.jpg", "img14.jpg", "img15.jpg", "img57.jpg", "img58.jpg"],
"Jack": ["img16.jpg", "img17.jpg", "img59.jpg", "img61.jpg", "img62.jpg"],
"Elon": ["img18.jpg", "img19.jpg", "img67.jpg"],
"Jeff": ["img20.jpg", "img21.jpg"],
"Marissa": ["img22.jpg", "img23.jpg"],
"Sundar": ["img24.jpg", "img25.jpg"],
"Katy": ["img26.jpg", "img27.jpg", "img28.jpg", "img42.jpg", "img43.jpg", "img44.jpg", "img45.jpg", "img46.jpg"],
"Matt": ["img29.jpg", "img30.jpg", "img31.jpg", "img32.jpg", "img33.jpg"],
"Leonardo": ["img34.jpg", "img35.jpg", "img36.jpg", "img37.jpg"],
"George": ["img38.jpg", "img39.jpg", "img40.jpg", "img41.jpg"]
}
#--------------------------
#Positives
positives = []
for key, values in idendities.items():
#print(key)
for i in range(0, len(values)-1):
for j in range(i+1, len(values)):
#print(values[i], " and ", values[j])
positive = []
positive.append(values[i])
positive.append(values[j])
positives.append(positive)
positives = pd.DataFrame(positives, columns = ["file_x", "file_y"])
positives["decision"] = "Yes"
print(positives.shape)
#--------------------------
#Negatives
samples_list = list(idendities.values())
negatives = []
for i in range(0, len(idendities) - 1):
for j in range(i+1, len(idendities)):
#print(samples_list[i], " vs ",samples_list[j])
cross_product = itertools.product(samples_list[i], samples_list[j])
cross_product = list(cross_product)
#print(cross_product)
for cross_sample in cross_product:
#print(cross_sample[0], " vs ", cross_sample[1])
negative = []
negative.append(cross_sample[0])
negative.append(cross_sample[1])
negatives.append(negative)
negatives = pd.DataFrame(negatives, columns = ["file_x", "file_y"])
negatives["decision"] = "No"
negatives = negatives.sample(positives.shape[0])
print(negatives.shape)
#--------------------------
#Merge positive and negative ones
df = pd.concat([positives, negatives]).reset_index(drop = True)
print(df.decision.value_counts())
df.file_x = "dataset/"+df.file_x
df.file_y = "dataset/"+df.file_y
#--------------------------
#DeepFace
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
instances = df[["file_x", "file_y"]].values.tolist()
models = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
metrics = ['cosine', 'euclidean_l2']
if True:
pretrained_models = {}
pretrained_models["VGG-Face"] = VGGFace.loadModel()
print("VGG-Face loaded")
pretrained_models["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
pretrained_models["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")
for model in models:
for metric in metrics:
resp_obj = DeepFace.verify(instances
, model_name = model
, model = pretrained_models[model]
, distance_metric = metric
, enforce_detection = False)
distances = []
for i in range(0, len(instances)):
distance = round(resp_obj["pair_%s" % (i+1)]["distance"], 4)
distances.append(distance)
df['%s_%s' % (model, metric)] = distances
df.to_csv("face-recognition-pivot.csv", index = False)
else:
df = pd.read_csv("face-recognition-pivot.csv")
df_raw = df.copy()
#--------------------------
#Distribution
fig = plt.figure(figsize=(15, 15))
figure_idx = 1
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
ax1 = fig.add_subplot(len(models) * len(metrics), len(metrics), figure_idx)
df[df.decision == "Yes"][feature].plot(kind='kde', title = feature, label = 'Yes', legend = True)
df[df.decision == "No"][feature].plot(kind='kde', title = feature, label = 'No', legend = True)
figure_idx = figure_idx + 1
plt.show()
#--------------------------
#Pre-processing for modelling
columns = []
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
columns.append(feature)
columns.append("decision")
df = df[columns]
df.loc[df[df.decision == 'Yes'].index, 'decision'] = 1
df.loc[df[df.decision == 'No'].index, 'decision'] = 0
print(df.head())
#--------------------------
#Train test split
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.30, random_state=17)
target_name = "decision"
y_train = df_train[target_name].values
x_train = df_train.drop(columns=[target_name]).values
y_test = df_test[target_name].values
x_test = df_test.drop(columns=[target_name]).values
#print("target distribution:")
#print(df_train[target_name].value_counts())
#print(df_test[target_name].value_counts())
#--------------------------
#LightGBM
import lightgbm as lgb
features = df.drop(columns=[target_name]).columns.tolist()
lgb_train = lgb.Dataset(x_train, y_train, feature_name = features)
lgb_test = lgb.Dataset(x_test, y_test, feature_name = features)
params = {
'task': 'train'
, 'boosting_type': 'gbdt'
, 'objective': 'multiclass'
, 'num_class': 2
, 'metric': 'multi_logloss'
}
gbm = lgb.train(params, lgb_train, num_boost_round=500, early_stopping_rounds = 50, valid_sets=lgb_test)
gbm.save_model("face-recognition-ensemble-model.txt")
#--------------------------
#Evaluation
predictions = gbm.predict(x_test)
prediction_classes = []
for prediction in predictions:
prediction_class = np.argmax(prediction)
prediction_classes.append(prediction_class)
y_test = list(y_test)
cm = confusion_matrix(y_test, prediction_classes)
print(cm)
tn, fp, fn, tp = cm.ravel()
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn)/(tn + fp + fn + tp)
f1 = 2 * (precision * recall) / (precision + recall)
print("Precision: ", 100*precision,"%")
print("Recall: ", 100*recall,"%")
print("F1 score ",100*f1, "%")
print("Accuracy: ", 100*accuracy,"%")
#--------------------------
#Interpretability
ax = lgb.plot_importance(gbm, max_num_features=20)
plt.show()
"""
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
plt.rcParams["figure.figsize"] = [20, 20]
for i in range(0, gbm.num_trees()):
ax = lgb.plot_tree(gbm, tree_index = i)
plt.show()
if i == 2:
break
"""
#--------------------------
#ROC Curve
y_pred_proba = predictions[::,1]
fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
auc = roc_auc_score(y_test, y_pred_proba)
plt.figure(figsize=(7,3))
plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
#--------------------------

View File

@ -1,89 +1,86 @@
#!pip install deepface
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.commons import functions
import matplotlib.pyplot as plt
import numpy as np
from deepface.basemodels import VGGFace
from deepface.commons import functions
#----------------------------------------------
#build face recognition model
# ----------------------------------------------
# build face recognition model
model = VGGFace.loadModel()
#model = Facenet.loadModel()
#model = OpenFace.loadModel()
#model = FbDeepFace.loadModel()
try:
input_shape = model.layers[0].input_shape[1:3]
except: #issue 470
input_shape = model.layers[0].input_shape[0][1:3]
input_shape = model.layers[0].input_shape[1:3]
except: # issue 470
input_shape = model.layers[0].input_shape[0][1:3]
print("model input shape: ", model.layers[0].input_shape[1:])
print("model output shape: ", model.layers[-1].input_shape[-1])
#----------------------------------------------
#load images and find embeddings
# ----------------------------------------------
# load images and find embeddings
#img1 = functions.detectFace("dataset/img1.jpg", input_shape)
# img1 = functions.detectFace("dataset/img1.jpg", input_shape)
img1 = functions.preprocess_face("dataset/img1.jpg", input_shape)
img1_representation = model.predict(img1)[0,:]
img1_representation = model.predict(img1)[0, :]
#img2 = functions.detectFace("dataset/img3.jpg", input_shape)
# img2 = functions.detectFace("dataset/img3.jpg", input_shape)
img2 = functions.preprocess_face("dataset/img3.jpg", input_shape)
img2_representation = model.predict(img2)[0,:]
img2_representation = model.predict(img2)[0, :]
#----------------------------------------------
#distance between two images
# ----------------------------------------------
# distance between two images
distance_vector = np.square(img1_representation - img2_representation)
#print(distance_vector)
# print(distance_vector)
distance = np.sqrt(distance_vector.sum())
print("Euclidean distance: ",distance)
print("Euclidean distance: ", distance)
#----------------------------------------------
#expand vectors to be shown better in graph
# ----------------------------------------------
# expand vectors to be shown better in graph
img1_graph = []; img2_graph = []; distance_graph = []
img1_graph = []
img2_graph = []
distance_graph = []
for i in range(0, 200):
img1_graph.append(img1_representation)
img2_graph.append(img2_representation)
distance_graph.append(distance_vector)
img1_graph.append(img1_representation)
img2_graph.append(img2_representation)
distance_graph.append(distance_vector)
img1_graph = np.array(img1_graph)
img2_graph = np.array(img2_graph)
distance_graph = np.array(distance_graph)
#----------------------------------------------
#plotting
# ----------------------------------------------
# plotting
fig = plt.figure()
ax1 = fig.add_subplot(3,2,1)
plt.imshow(img1[0][:,:,::-1])
plt.axis('off')
ax1 = fig.add_subplot(3, 2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.axis("off")
ax2 = fig.add_subplot(3,2,2)
im = plt.imshow(img1_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax2 = fig.add_subplot(3, 2, 2)
im = plt.imshow(img1_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
ax3 = fig.add_subplot(3,2,3)
plt.imshow(img2[0][:,:,::-1])
plt.axis('off')
ax3 = fig.add_subplot(3, 2, 3)
plt.imshow(img2[0][:, :, ::-1])
plt.axis("off")
ax4 = fig.add_subplot(3,2,4)
im = plt.imshow(img2_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax4 = fig.add_subplot(3, 2, 4)
im = plt.imshow(img2_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
ax5 = fig.add_subplot(3,2,5)
plt.text(0.35, 0, "Distance: %s" % (distance))
plt.axis('off')
ax5 = fig.add_subplot(3, 2, 5)
plt.text(0.35, 0, f"Distance: {distance}")
plt.axis("off")
ax6 = fig.add_subplot(3,2,6)
im = plt.imshow(distance_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax6 = fig.add_subplot(3, 2, 6)
im = plt.imshow(distance_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
plt.show()
#----------------------------------------------
# ----------------------------------------------

View File

@ -9,320 +9,344 @@ from deepface import DeepFace
print("-----------------------------------------")
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf_major_version = int(tf.__version__.split(".")[0])
tf_major_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_major_version == 2:
import logging
tf.get_logger().setLevel(logging.ERROR)
import logging
tf.get_logger().setLevel(logging.ERROR)
print("Running unit tests for TF ", tf.__version__)
print("-----------------------------------------")
expected_coverage = 97
num_cases = 0; succeed_cases = 0
num_cases = 0
succeed_cases = 0
def evaluate(condition):
global num_cases, succeed_cases
global num_cases, succeed_cases
if condition is True:
succeed_cases += 1
if condition == True:
succeed_cases += 1
num_cases += 1
num_cases += 1
# ------------------------------------------------
detectors = ['opencv', 'mtcnn']
models = ['VGG-Face', 'Facenet', 'ArcFace']
metrics = ['cosine', 'euclidean', 'euclidean_l2']
detectors = ["opencv", "mtcnn"]
models = ["VGG-Face", "Facenet", "ArcFace"]
metrics = ["cosine", "euclidean", "euclidean_l2"]
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True],
['dataset/img6.jpg', 'dataset/img7.jpg', True],
['dataset/img8.jpg', 'dataset/img9.jpg', True],
['dataset/img1.jpg', 'dataset/img11.jpg', True],
['dataset/img2.jpg', 'dataset/img11.jpg', True],
['dataset/img1.jpg', 'dataset/img3.jpg', False],
['dataset/img2.jpg', 'dataset/img3.jpg', False],
['dataset/img6.jpg', 'dataset/img8.jpg', False],
['dataset/img6.jpg', 'dataset/img9.jpg', False],
["dataset/img1.jpg", "dataset/img2.jpg", True],
["dataset/img5.jpg", "dataset/img6.jpg", True],
["dataset/img6.jpg", "dataset/img7.jpg", True],
["dataset/img8.jpg", "dataset/img9.jpg", True],
["dataset/img1.jpg", "dataset/img11.jpg", True],
["dataset/img2.jpg", "dataset/img11.jpg", True],
["dataset/img1.jpg", "dataset/img3.jpg", False],
["dataset/img2.jpg", "dataset/img3.jpg", False],
["dataset/img6.jpg", "dataset/img8.jpg", False],
["dataset/img6.jpg", "dataset/img9.jpg", False],
]
print("-----------------------------------------")
def test_cases():
print("Enforce detection test")
black_img = np.zeros([224, 224, 3])
# enforce detection on for represent
try:
DeepFace.represent(img_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for represent
try:
objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
exception_thrown = False
# validate response of represent function
assert isinstance(objs, list)
assert len(objs) > 0
assert isinstance(objs[0], dict)
assert "embedding" in objs[0].keys()
assert "facial_area" in objs[0].keys()
assert isinstance(objs[0]["facial_area"], dict)
assert "x" in objs[0]["facial_area"].keys()
assert "y" in objs[0]["facial_area"].keys()
assert "w" in objs[0]["facial_area"].keys()
assert "h" in objs[0]["facial_area"].keys()
assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 #embedding of VGG-Face
except:
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
# enforce detection on for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
assert isinstance(obj, dict)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
print("-----------------------------------------")
print("Extract faces test")
for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend = detector)
for img_obj in img_objs:
assert "face" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "confidence" in img_obj.keys()
img = img_obj["face"]
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
print(detector," test is done")
print("-----------------------------------------")
img_path = "dataset/img1.jpg"
embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"]
print("Function returned ", len(embedding), "dimensional vector")
evaluate(len(embedding) == 2622)
print("-----------------------------------------")
print("Different face detectors on verification test")
for detector in detectors:
print(detector + " detector")
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend = detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
assert res["verified"] in [True, False]
assert "distance" in res.keys()
assert "threshold" in res.keys()
assert "model" in res.keys()
assert "detector_backend" in res.keys()
assert "similarity_metric" in res.keys()
assert "facial_areas" in res.keys()
assert "img1" in res["facial_areas"].keys()
assert "img2" in res["facial_areas"].keys()
assert "x" in res["facial_areas"]["img1"].keys()
assert "y" in res["facial_areas"]["img1"].keys()
assert "w" in res["facial_areas"]["img1"].keys()
assert "h" in res["facial_areas"]["img1"].keys()
assert "x" in res["facial_areas"]["img2"].keys()
assert "y" in res["facial_areas"]["img2"].keys()
assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys()
print(res)
evaluate(res["verified"] == dataset[0][2])
print("-----------------------------------------")
print("Find function test")
dfs = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset")
for df in dfs:
assert isinstance(df, pd.DataFrame)
print(df.head())
evaluate(df.shape[0] > 0)
print("-----------------------------------------")
print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img)
for demography in demography_objs:
print(demography)
evaluate(demography["age"] > 20 and demography["age"] < 40)
evaluate(demography["dominant_gender"] == "Woman")
print("Enforce detection test")
black_img = np.zeros([224, 224, 3])
# enforce detection on for represent
try:
DeepFace.represent(img_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for represent
try:
objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
exception_thrown = False
# validate response of represent function
assert isinstance(objs, list)
assert len(objs) > 0
assert isinstance(objs[0], dict)
assert "embedding" in objs[0].keys()
assert "facial_area" in objs[0].keys()
assert isinstance(objs[0]["facial_area"], dict)
assert "x" in objs[0]["facial_area"].keys()
assert "y" in objs[0]["facial_area"].keys()
assert "w" in objs[0]["facial_area"].keys()
assert "h" in objs[0]["facial_area"].keys()
assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face
except:
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
# enforce detection on for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is True
# -------------------------------------------
# enforce detection off for verify
try:
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
assert isinstance(obj, dict)
exception_thrown = False
except:
exception_thrown = True
assert exception_thrown is False
# -------------------------------------------
print("-----------------------------------------")
print("Extract faces test")
for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector)
for img_obj in img_objs:
assert "face" in img_obj.keys()
assert "facial_area" in img_obj.keys()
assert isinstance(img_obj["facial_area"], dict)
assert "x" in img_obj["facial_area"].keys()
assert "y" in img_obj["facial_area"].keys()
assert "w" in img_obj["facial_area"].keys()
assert "h" in img_obj["facial_area"].keys()
assert "confidence" in img_obj.keys()
img = img_obj["face"]
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
print(detector, " test is done")
print("-----------------------------------------")
img_path = "dataset/img1.jpg"
embedding_objs = DeepFace.represent(img_path)
for embedding_obj in embedding_objs:
embedding = embedding_obj["embedding"]
print("Function returned ", len(embedding), "dimensional vector")
evaluate(len(embedding) == 2622)
print("-----------------------------------------")
print("Different face detectors on verification test")
for detector in detectors:
print(detector + " detector")
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend=detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
assert res["verified"] in [True, False]
assert "distance" in res.keys()
assert "threshold" in res.keys()
assert "model" in res.keys()
assert "detector_backend" in res.keys()
assert "similarity_metric" in res.keys()
assert "facial_areas" in res.keys()
assert "img1" in res["facial_areas"].keys()
assert "img2" in res["facial_areas"].keys()
assert "x" in res["facial_areas"]["img1"].keys()
assert "y" in res["facial_areas"]["img1"].keys()
assert "w" in res["facial_areas"]["img1"].keys()
assert "h" in res["facial_areas"]["img1"].keys()
assert "x" in res["facial_areas"]["img2"].keys()
assert "y" in res["facial_areas"]["img2"].keys()
assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys()
print(res)
evaluate(res["verified"] == dataset[0][2])
print("-----------------------------------------")
print("Find function test")
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
for df in dfs:
assert isinstance(df, pd.DataFrame)
print(df.head())
evaluate(df.shape[0] > 0)
print("-----------------------------------------")
print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"
demography_objs = DeepFace.analyze(img)
for demography in demography_objs:
print(demography)
evaluate(demography["age"] > 20 and demography["age"] < 40)
evaluate(demography["dominant_gender"] == "Woman")
print("-----------------------------------------")
print("Facial analysis test. Passing all to the action")
demography_objs = DeepFace.analyze(img, ["age", "gender", "race", "emotion"])
for demography in demography_objs:
# print(f"Demography: {demography}")
# check response is a valid json
print("Age: ", demography["age"])
print("Gender: ", demography["dominant_gender"])
print("Race: ", demography["dominant_race"])
print("Emotion: ", demography["dominant_emotion"])
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is not None)
evaluate(demography.get("dominant_emotion") is not None)
print("-----------------------------------------")
print("Facial analysis test 2. Remove some actions and check they are not computed")
demography_objs = DeepFace.analyze(img, ["age", "gender"])
for demography in demography_objs:
print("Age: ", demography.get("age"))
print("Gender: ", demography.get("dominant_gender"))
print("Race: ", demography.get("dominant_race"))
print("Emotion: ", demography.get("dominant_emotion"))
print("-----------------------------------------")
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is None)
evaluate(demography.get("dominant_emotion") is None)
print("Facial analysis test. Passing all to the action")
demography_objs = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])
print("-----------------------------------------")
for demography in demography_objs:
#print(f"Demography: {demography}")
#check response is a valid json
print("Age: ", demography["age"])
print("Gender: ", demography["dominant_gender"])
print("Race: ", demography["dominant_race"])
print("Emotion: ", demography["dominant_emotion"])
print("Facial recognition tests")
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is not None)
evaluate(demography.get("dominant_emotion") is not None)
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
print("-----------------------------------------")
resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
print("Facial analysis test 2. Remove some actions and check they are not computed")
demography_objs = DeepFace.analyze(img, ['age', 'gender'])
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
threshold = resp_obj["threshold"]
for demography in demography_objs:
print("Age: ", demography.get("age"))
print("Gender: ", demography.get("dominant_gender"))
print("Race: ", demography.get("dominant_race"))
print("Emotion: ", demography.get("dominant_emotion"))
passed = prediction == result
evaluate(demography.get("age") is not None)
evaluate(demography.get("dominant_gender") is not None)
evaluate(demography.get("dominant_race") is None)
evaluate(demography.get("dominant_emotion") is None)
evaluate(passed)
print("-----------------------------------------")
if passed:
test_result_label = "passed"
else:
test_result_label = "failed"
print("Facial recognition tests")
if prediction == True:
classified_label = "verified"
else:
classified_label = "unverified"
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
print(
img1.split("/", maxsplit=1)[-1],
"-",
img2.split("/", maxsplit=1)[-1],
classified_label,
"as same person based on",
model,
"and",
metric,
". Distance:",
distance,
", Threshold:",
threshold,
"(",
test_result_label,
")",
)
resp_obj = DeepFace.verify(img1, img2
, model_name = model
, distance_metric = metric)
print("--------------------------")
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
threshold = resp_obj["threshold"]
# -----------------------------------------
passed = prediction == result
print("Passing numpy array to analyze function")
evaluate(passed)
img = cv2.imread("dataset/img1.jpg")
resp_objs = DeepFace.analyze(img)
if passed:
test_result_label = "passed"
else:
test_result_label = "failed"
for resp_obj in resp_objs:
print(resp_obj)
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
evaluate(resp_obj["gender"] == "Woman")
if prediction == True:
classified_label = "verified"
else:
classified_label = "unverified"
print("--------------------------")
print(img1.split("/")[-1], "-", img2.split("/")[-1], classified_label, "as same person based on", model,"and",metric,". Distance:",distance,", Threshold:", threshold,"(",test_result_label,")")
print("Passing numpy array to verify function")
print("--------------------------")
img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg")
# -----------------------------------------
res = DeepFace.verify(img1, img2)
print(res)
evaluate(res["verified"] == True)
print("Passing numpy array to analyze function")
print("--------------------------")
img = cv2.imread("dataset/img1.jpg")
resp_objs = DeepFace.analyze(img)
print("Passing numpy array to find function")
for resp_obj in resp_objs:
print(resp_obj)
evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
evaluate(resp_obj["gender"] == "Woman")
img1 = cv2.imread("dataset/img1.jpg")
print("--------------------------")
dfs = DeepFace.find(img1, db_path="dataset")
print("Passing numpy array to verify function")
for df in dfs:
print(df.head())
evaluate(df.shape[0] > 0)
img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg")
print("--------------------------")
res = DeepFace.verify(img1, img2)
print(res)
evaluate(res["verified"] == True)
print("non-binary gender tests")
print("--------------------------")
# interface validation - no need to call evaluate here
print("Passing numpy array to find function")
for img1_path, _, _ in dataset:
for detector in detectors:
results = DeepFace.analyze(
img1_path, actions=("gender",), detector_backend=detector, enforce_detection=False
)
img1 = cv2.imread("dataset/img1.jpg")
for result in results:
print(result)
dfs = DeepFace.find(img1, db_path = "dataset")
assert "gender" in result.keys()
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
"Man",
"Woman",
]
for df in dfs:
print(df.head())
evaluate(df.shape[0] > 0)
if result["dominant_gender"] == "Man":
assert result["gender"]["Man"] > result["gender"]["Woman"]
else:
assert result["gender"]["Man"] < result["gender"]["Woman"]
print("--------------------------")
print("non-binary gender tests")
#interface validation - no need to call evaluate here
for img1_path, img2_path, verified in dataset:
for detector in detectors:
results = DeepFace.analyze(img1_path, actions=('gender',), detector_backend=detector, enforce_detection=False)
for result in results:
print(result)
assert 'gender' in result.keys()
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
if result["dominant_gender"] == "Man":
assert result["gender"]["Man"] > result["gender"]["Woman"]
else:
assert result["gender"]["Man"] < result["gender"]["Woman"]
# ---------------------------------------------
@ -336,8 +360,8 @@ test_score = (100 * succeed_cases) / num_cases
print("test coverage: " + str(test_score))
if test_score > expected_coverage:
print("well done! min required test coverage is satisfied")
print("well done! min required test coverage is satisfied")
else:
print("min required test coverage is NOT satisfied")
print("min required test coverage is NOT satisfied")
assert test_score > expected_coverage