This commit is contained in:
Sefik Ilkin Serengil 2023-01-29 00:45:25 +00:00
parent 24cc717620
commit 7a978d29c5
33 changed files with 5587 additions and 3369 deletions

45
.github/workflows/lint.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: Tests
on:
push:
paths:
- '.github/workflows/lint.yml'
- 'deepface/**'
- 'tests/**'
- 'api/**'
- 'requirements.txt'
- '.gitignore'
- 'setup.py'
pull_request:
paths:
- '.github/workflows/lint.yml'
- 'deepface/**'
- 'tests/**'
- 'api/**'
- 'requirements.txt'
- '.gitignore'
- 'setup.py'
jobs:
linting-tests-ubuntu-latest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Lint tests with pylint
run: |
pylint --fail-under=10 deepface/

1
.gitignore vendored
View File

@ -7,7 +7,6 @@ dist/
Pipfile
Pipfile.lock
.mypy_cache/
.vscode/
.idea/
deepface.egg-info/
deepface/__pycache__/*

635
.pylintrc Normal file
View File

@ -0,0 +1,635 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\' represents the directory delimiter on Windows systems, it
# can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.9
# Discover python modules and packages in the file system subtree.
recursive=no
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=BaseException,
Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
import-error,
invalid-name,
missing-module-docstring,
missing-function-docstring,
missing-class-docstring,
too-many-arguments,
too-many-locals,
too-many-branches,
too-many-statements,
global-variable-undefined,
import-outside-toplevel,
singleton-comparison,
too-many-lines,
duplicate-code,
bare-except,
cyclic-import,
global-statement
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it work,
# install the 'python-enchant' package.
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io

17
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,17 @@
{
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.linting.pylintUseMinimalCheckers": false,
"editor.formatOnSave": true,
"editor.renderWhitespace": "all",
"files.autoSave": "afterDelay",
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.formatting.blackArgs": [
"--line-length=100"
],
"editor.fontWeight": "normal",
"python.analysis.extraPaths": [
"./deepface"
]
}

View File

@ -1,27 +1,43 @@
import warnings
warnings.filterwarnings("ignore")
# common dependencies
import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
from os import path
import cv2
import warnings
import time
import pickle
import logging
# 3rd party dependencies
import numpy as np
import pandas as pd
from tqdm import tqdm
import pickle
import cv2
import tensorflow as tf
from deepface.basemodels import VGGFace, OpenFace, Facenet, Facenet512, FbDeepFace, DeepID, DlibWrapper, ArcFace, SFace
# package dependencies
from deepface.basemodels import (
VGGFace,
OpenFace,
Facenet,
Facenet512,
FbDeepFace,
DeepID,
DlibWrapper,
ArcFace,
SFace,
)
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, realtime, distance as dst
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# -----------------------------------
# configurations for dependencies
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 2:
import logging
tf.get_logger().setLevel(logging.ERROR)
# -----------------------------------
def build_model(model_name):
@ -36,60 +52,75 @@ def build_model(model_name):
built deepface model
"""
global model_obj #singleton design pattern
# singleton design pattern
global model_obj
models = {
'VGG-Face': VGGFace.loadModel,
'OpenFace': OpenFace.loadModel,
'Facenet': Facenet.loadModel,
'Facenet512': Facenet512.loadModel,
'DeepFace': FbDeepFace.loadModel,
'DeepID': DeepID.loadModel,
'Dlib': DlibWrapper.loadModel,
'ArcFace': ArcFace.loadModel,
'SFace': SFace.load_model,
'Emotion': Emotion.loadModel,
'Age': Age.loadModel,
'Gender': Gender.loadModel,
'Race': Race.loadModel
"VGG-Face": VGGFace.loadModel,
"OpenFace": OpenFace.loadModel,
"Facenet": Facenet.loadModel,
"Facenet512": Facenet512.loadModel,
"DeepFace": FbDeepFace.loadModel,
"DeepID": DeepID.loadModel,
"Dlib": DlibWrapper.loadModel,
"ArcFace": ArcFace.loadModel,
"SFace": SFace.load_model,
"Emotion": Emotion.loadModel,
"Age": Age.loadModel,
"Gender": Gender.loadModel,
"Race": Race.loadModel,
}
if not "model_obj" in globals():
model_obj = {}
if not model_name in model_obj.keys():
if not model_name in model_obj:
model = models.get(model_name)
if model:
model = model()
model_obj[model_name] = model
#print(model_name," built")
else:
raise ValueError('Invalid model_name passed - {}'.format(model_name))
raise ValueError(f"Invalid model_name passed - {model_name}")
return model_obj[model_name]
def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'opencv', distance_metric = 'cosine', enforce_detection = True, align = True, normalization = 'base'):
def verify(
img1_path,
img2_path,
model_name="VGG-Face",
detector_backend="opencv",
distance_metric="cosine",
enforce_detection=True,
align=True,
normalization="base",
):
"""
This function verifies an image pair is same person or different persons. In the background, verification function represents
facial images as vectors and then calculates the similarity between those vectors. Vectors of same person images should have
more similarity (or less distance) than vectors of different persons.
This function verifies an image pair is same person or different persons. In the background,
verification function represents facial images as vectors and then calculates the similarity
between those vectors. Vectors of same person images should have more similarity (or less
distance) than vectors of different persons.
Parameters:
img1_path, img2_path: exact image path as string. numpy array (BGR) or based64 encoded images are also welcome.
If one of pair has more than one face, then we will compare the face pair with max similarity.
img1_path, img2_path: exact image path as string. numpy array (BGR) or based64 encoded
images are also welcome. If one of pair has more than one face, then we will compare the
face pair with max similarity.
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace
model_name (str): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib
, ArcFace and SFace
distance_metric (string): cosine, euclidean, euclidean_l2
enforce_detection (boolean): If no face could not be detected in an image, then this function will return exception by default.
Set this to False not to have this exception. This might be convenient for low resolution images.
enforce_detection (boolean): If no face could not be detected in an image, then this
function will return exception by default. Set this to False not to have this exception.
This might be convenient for low resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
Returns:
Verify function returns a dictionary. If img1_path is a list of image pairs, then the function will return list of dictionary.
Verify function returns a dictionary.
{
"verified": True
@ -108,56 +139,62 @@ def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'op
tic = time.time()
#--------------------------------
# --------------------------------
target_size = functions.find_target_size(model_name=model_name)
# img pairs might have many faces
img1_objs = functions.extract_faces(
img = img1_path,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align)
img=img1_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
img2_objs = functions.extract_faces(
img = img2_path,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align)
#--------------------------------
img=img2_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
# --------------------------------
distances = []
regions = []
# now we will find the face pair with minimum distance
for img1_content, img1_region, img1_confidence in img1_objs:
for img2_content, img2_region, img2_confidence in img2_objs:
img1_embedding_obj = represent(img_path = img1_content
, model_name = model_name
, enforce_detection = enforce_detection
, detector_backend = "skip"
, align = align
, normalization = normalization
for img1_content, img1_region, _ in img1_objs:
for img2_content, img2_region, _ in img2_objs:
img1_embedding_obj = represent(
img_path=img1_content,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
img2_embedding_obj = represent(img_path = img2_content
, model_name = model_name
, enforce_detection = enforce_detection
, detector_backend = "skip"
, align = align
, normalization = normalization
img2_embedding_obj = represent(
img_path=img2_content,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
img1_representation = img1_embedding_obj[0]["embedding"]
img2_representation = img2_embedding_obj[0]["embedding"]
if distance_metric == 'cosine':
if distance_metric == "cosine":
distance = dst.findCosineDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
elif distance_metric == "euclidean_l2":
distance = dst.findEuclideanDistance(
dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)
)
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
@ -166,42 +203,53 @@ def verify(img1_path, img2_path, model_name = 'VGG-Face', detector_backend = 'op
# -------------------------------
threshold = dst.findThreshold(model_name, distance_metric)
distance = min(distances) #best distance
distance = min(distances) # best distance
facial_areas = regions[np.argmin(distances)]
toc = time.time()
resp_obj = {
"verified": True if distance <= threshold else False
, "distance": distance
, "threshold": threshold
, "model": model_name
, "detector_backend": detector_backend
, "similarity_metric": distance_metric
, "facial_areas": {
"img1": facial_areas[0],
"img2": facial_areas[1]
}
, "time": round(toc - tic, 2)
"verified": distance <= threshold,
"distance": distance,
"threshold": threshold,
"model": model_name,
"detector_backend": detector_backend,
"similarity_metric": distance_metric,
"facial_areas": {"img1": facial_areas[0], "img2": facial_areas[1]},
"time": round(toc - tic, 2),
}
return resp_obj
def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_detection = True, detector_backend = 'opencv', align = True, silent = False):
def analyze(
img_path,
actions=("emotion", "age", "gender", "race"),
enforce_detection=True,
detector_backend="opencv",
align=True,
silent=False,
):
"""
This function analyzes facial attributes including age, gender, emotion and race. In the background, analysis function builds convolutional
neural network models to classify age, gender, emotion and race of the input image.
This function analyzes facial attributes including age, gender, emotion and race.
In the background, analysis function builds convolutional neural network models to
classify age, gender, emotion and race of the input image.
Parameters:
img_path: exact image path, numpy array (BGR) or base64 encoded image could be passed.
If source image has more than one face, then result will be size of number of faces appearing in the image.
If source image has more than one face, then result will be size of number of faces
appearing in the image.
actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop some of those attributes.
actions (tuple): The default is ('age', 'gender', 'emotion', 'race'). You can drop
some of those attributes.
enforce_detection (boolean): The function throws exception if no face detected by default. Set this to False if you don't want to get exception. This might be convenient for low resolution images.
enforce_detection (bool): The function throws exception if no face detected by default.
Set this to False if you don't want to get exception. This might be convenient for low
resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe.
silent (boolean): disable (some) log messages
@ -239,66 +287,81 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_d
}
]
"""
#---------------------------------
# ---------------------------------
# validate actions
if type(actions) == str:
if isinstance(actions, str):
actions = (actions,)
actions = list(actions)
#---------------------------------
# ---------------------------------
# build models
models = {}
if 'emotion' in actions:
models['emotion'] = build_model('Emotion')
if "emotion" in actions:
models["emotion"] = build_model("Emotion")
if 'age' in actions:
models['age'] = build_model('Age')
if "age" in actions:
models["age"] = build_model("Age")
if 'gender' in actions:
models['gender'] = build_model('Gender')
if "gender" in actions:
models["gender"] = build_model("Gender")
if 'race' in actions:
models['race'] = build_model('Race')
#---------------------------------
if "race" in actions:
models["race"] = build_model("Race")
# ---------------------------------
resp_objects = []
img_objs = functions.extract_faces(img=img_path, target_size=(224, 224), detector_backend=detector_backend, grayscale = False, enforce_detection=enforce_detection, align=align)
img_objs = functions.extract_faces(
img=img_path,
target_size=(224, 224),
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
for img_content, img_region, img_confidence in img_objs:
for img_content, img_region, _ in img_objs:
if img_content.shape[0] > 0 and img_content.shape[1] > 0:
obj = {}
#facial attribute analysis
pbar = tqdm(range(0, len(actions)), desc='Finding actions', disable = silent)
# facial attribute analysis
pbar = tqdm(range(0, len(actions)), desc="Finding actions", disable=silent)
for index in pbar:
action = actions[index]
pbar.set_description("Action: %s" % (action))
pbar.set_description(f"Action: {action}")
if action == 'emotion':
if action == "emotion":
img_gray = cv2.cvtColor(img_content[0], cv2.COLOR_BGR2GRAY)
img_gray = cv2.resize(img_gray, (48, 48))
img_gray = np.expand_dims(img_gray, axis = 0)
img_gray = np.expand_dims(img_gray, axis=0)
emotion_predictions = models['emotion'].predict(img_gray, verbose=0)[0,:]
emotion_predictions = models["emotion"].predict(img_gray, verbose=0)[0, :]
sum_of_predictions = emotion_predictions.sum()
obj["emotion"] = {}
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
emotion_labels = [
"angry",
"disgust",
"fear",
"happy",
"sad",
"surprise",
"neutral",
]
for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
for i, emotion_label in enumerate(emotion_labels):
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
obj["emotion"][emotion_label] = emotion_prediction
obj["dominant_emotion"] = emotion_labels[np.argmax(emotion_predictions)]
elif action == 'age':
age_predictions = models['age'].predict(img_content, verbose=0)[0,:]
elif action == "age":
age_predictions = models["age"].predict(img_content, verbose=0)[0, :]
apparent_age = Age.findApparentAge(age_predictions)
obj["age"] = int(apparent_age) #int cast is for the exception - object of type 'float32' is not JSON serializable
# int cast is for exception - object of type 'float32' is not JSON serializable
obj["age"] = int(apparent_age)
elif action == 'gender':
gender_predictions = models['gender'].predict(img_content, verbose=0)[0,:]
elif action == "gender":
gender_predictions = models["gender"].predict(img_content, verbose=0)[0, :]
gender_labels = ["Woman", "Man"]
obj["gender"] = {}
for i, gender_label in enumerate(gender_labels):
@ -307,20 +370,26 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_d
obj["dominant_gender"] = gender_labels[np.argmax(gender_predictions)]
elif action == 'race':
race_predictions = models['race'].predict(img_content, verbose=0)[0,:]
elif action == "race":
race_predictions = models["race"].predict(img_content, verbose=0)[0, :]
sum_of_predictions = race_predictions.sum()
obj["race"] = {}
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_labels = [
"asian",
"indian",
"black",
"white",
"middle eastern",
"latino hispanic",
]
for i, race_label in enumerate(race_labels):
race_prediction = 100 * race_predictions[i] / sum_of_predictions
obj["race"][race_label] = race_prediction
obj["dominant_race"] = race_labels[np.argmax(race_predictions)]
#-----------------------------
# -----------------------------
# mention facial areas
obj["region"] = img_region
@ -328,93 +397,129 @@ def analyze(img_path, actions = ('emotion', 'age', 'gender', 'race') , enforce_d
return resp_objects
def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine', enforce_detection = True, detector_backend = 'opencv', align = True, normalization = 'base', silent=False):
def find(
img_path,
db_path,
model_name="VGG-Face",
distance_metric="cosine",
enforce_detection=True,
detector_backend="opencv",
align=True,
normalization="base",
silent=False,
):
"""
This function applies verification several times and find the identities in a database
Parameters:
img_path: exact image path, numpy array (BGR) or based64 encoded image.
Source image can have many faces. Then, result will be the size of number of faces in the source image.
Source image can have many faces. Then, result will be the size of number of
faces in the source image.
db_path (string): You should store some .jpg files in a folder and pass the exact folder path to this.
A database image can also have many faces. Then, all detected faces in db side will be considered in the decision.
db_path (string): You should store some image files in a folder and pass the
exact folder path to this. A database image can also have many faces.
Then, all detected faces in db side will be considered in the decision.
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID,
Dlib, ArcFace, SFace or Ensemble
distance_metric (string): cosine, euclidean, euclidean_l2
enforce_detection (boolean): The function throws exception if a face could not be detected. Set this to True if you don't want to get exception. This might be convenient for low resolution images.
enforce_detection (bool): The function throws exception if a face could not be detected.
Set this to True if you don't want to get exception. This might be convenient for low
resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
silent (boolean): disable some logging and progress bars
Returns:
This function returns list of pandas data frame. Each item of the list corresponding to an identity in the img_path.
This function returns list of pandas data frame. Each item of the list corresponding to
an identity in the img_path.
"""
tic = time.time()
#-------------------------------
if os.path.isdir(db_path) != True:
# -------------------------------
if os.path.isdir(db_path) is not True:
raise ValueError("Passed db_path does not exist!")
else:
target_size = functions.find_target_size(model_name=model_name)
#---------------------------------------
# ---------------------------------------
file_name = "representations_%s.pkl" % (model_name)
file_name = f"representations_{model_name}.pkl"
file_name = file_name.replace("-", "_").lower()
if path.exists(db_path+"/"+file_name):
if path.exists(db_path + "/" + file_name):
if not silent:
print("WARNING: Representations for images in ",db_path," folder were previously stored in ", file_name, ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again.")
print(
f"WARNING: Representations for images in {db_path} folder were previously stored"
+ f" in {file_name}. If you added new instances after the creation, then please "
+ "delete this file and call find function again. It will create it again."
)
f = open(db_path+'/'+file_name, 'rb')
with open(f"{db_path}/{file_name}", "rb") as f:
representations = pickle.load(f)
if not silent:
print("There are ", len(representations)," representations found in ",file_name)
print("There are ", len(representations), " representations found in ", file_name)
else: #create representation.pkl from scratch
else: # create representation.pkl from scratch
employees = []
for r, d, f in os.walk(db_path): # r=root, d=directories, f = files
for r, _, f in os.walk(db_path):
for file in f:
if ('.jpg' in file.lower()) or ('.jpeg' in file.lower()) or ('.png' in file.lower()):
if (
(".jpg" in file.lower())
or (".jpeg" in file.lower())
or (".png" in file.lower())
):
exact_path = r + "/" + file
employees.append(exact_path)
if len(employees) == 0:
raise ValueError("There is no image in ", db_path," folder! Validate .jpg or .png files exist in this path.")
raise ValueError(
"There is no image in ",
db_path,
" folder! Validate .jpg or .png files exist in this path.",
)
#------------------------
#find representations for db images
# ------------------------
# find representations for db images
representations = []
#for employee in employees:
pbar = tqdm(range(0,len(employees)), desc='Finding representations', disable = True if silent == True else False)
# for employee in employees:
pbar = tqdm(
range(0, len(employees)),
desc="Finding representations",
disable=silent,
)
for index in pbar:
employee = employees[index]
img_objs = functions.extract_faces(img = employee,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align
img_objs = functions.extract_faces(
img=employee,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
for img_content, img_region, img_confidence in img_objs:
embedding_obj = represent(img_path = img_content
, model_name = model_name
, enforce_detection = enforce_detection
, detector_backend = "skip"
, align = align
, normalization = normalization
for img_content, _, _ in img_objs:
embedding_obj = represent(
img_path=img_content,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
img_representation = embedding_obj[0]["embedding"]
@ -424,42 +529,46 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine',
instance.append(img_representation)
representations.append(instance)
#-------------------------------
# -------------------------------
f = open(db_path+'/'+file_name, "wb")
with open(f"{db_path}/{file_name}", "wb") as f:
pickle.dump(representations, f)
f.close()
if not silent:
print("Representations stored in ",db_path,"/",file_name," file. Please delete this file when you add new identities in your database.")
print(
f"Representations stored in {db_path}/{file_name} file."
+ "Please delete this file when you add new identities in your database."
)
#----------------------------
#now, we got representations for facial database
df = pd.DataFrame(representations, columns = ["identity", f"{model_name}_representation"])
# ----------------------------
# now, we got representations for facial database
df = pd.DataFrame(representations, columns=["identity", f"{model_name}_representation"])
# img path might have move than once face
target_objs = functions.extract_faces(img = img_path,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align
target_objs = functions.extract_faces(
img=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
resp_obj = []
for target_img, target_region, target_confidence in target_objs:
target_embedding_obj = represent(img_path = target_img
, model_name = model_name
, enforce_detection = enforce_detection
, detector_backend = "skip"
, align = align
, normalization = normalization
for target_img, target_region, _ in target_objs:
target_embedding_obj = represent(
img_path=target_img,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
target_representation = target_embedding_obj[0]["embedding"]
result_df = df.copy() #df will be filtered in each img
result_df = df.copy() # df will be filtered in each img
result_df["source_x"] = target_region["x"]
result_df["source_y"] = target_region["y"]
result_df["source_w"] = target_region["w"]
@ -469,25 +578,30 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine',
for index, instance in df.iterrows():
source_representation = instance[f"{model_name}_representation"]
if distance_metric == 'cosine':
if distance_metric == "cosine":
distance = dst.findCosineDistance(source_representation, target_representation)
elif distance_metric == 'euclidean':
elif distance_metric == "euclidean":
distance = dst.findEuclideanDistance(source_representation, target_representation)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(source_representation), dst.l2_normalize(target_representation))
elif distance_metric == "euclidean_l2":
distance = dst.findEuclideanDistance(
dst.l2_normalize(source_representation),
dst.l2_normalize(target_representation),
)
else:
raise ValueError(f"invalid distance metric passes - {distance_metric}")
distances.append(distance)
#---------------------------
# ---------------------------
result_df[f"{model_name}_{distance_metric}"] = distances
threshold = dst.findThreshold(model_name, distance_metric)
result_df = result_df.drop(columns = [f"{model_name}_representation"])
result_df = result_df.drop(columns=[f"{model_name}_representation"])
result_df = result_df[result_df[f"{model_name}_{distance_metric}"] <= threshold]
result_df = result_df.sort_values(by = [f"{model_name}_{distance_metric}"], ascending=True).reset_index(drop=True)
result_df = result_df.sort_values(
by=[f"{model_name}_{distance_metric}"], ascending=True
).reset_index(drop=True)
resp_obj.append(result_df)
@ -496,50 +610,67 @@ def find(img_path, db_path, model_name ='VGG-Face', distance_metric = 'cosine',
toc = time.time()
if not silent:
print("find function lasts ",toc-tic," seconds")
print("find function lasts ", toc - tic, " seconds")
return resp_obj
def represent(img_path, model_name = 'VGG-Face', enforce_detection = True, detector_backend = 'opencv', align = True, normalization = 'base'):
def represent(
img_path,
model_name="VGG-Face",
enforce_detection=True,
detector_backend="opencv",
align=True,
normalization="base",
):
"""
This function represents facial images as vectors. The function uses convolutional neural networks models to generate vector embeddings.
This function represents facial images as vectors. The function uses convolutional neural
networks models to generate vector embeddings.
Parameters:
img_path (string): exact image path. Alternatively, numpy array (BGR) or based64 encoded images could be passed.
Source image can have many faces. Then, result will be the size of number of faces appearing in the source image.
img_path (string): exact image path. Alternatively, numpy array (BGR) or based64
encoded images could be passed. Source image can have many faces. Then, result will
be the size of number of faces appearing in the source image.
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
ArcFace, SFace
enforce_detection (boolean): If no face could not be detected in an image, then this function will return exception by default.
Set this to False not to have this exception. This might be convenient for low resolution images.
enforce_detection (boolean): If no face could not be detected in an image, then this
function will return exception by default. Set this to False not to have this exception.
This might be convenient for low resolution images.
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd, dlib or mediapipe
detector_backend (string): set face detector backend to opencv, retinaface, mtcnn, ssd,
dlib or mediapipe
align (boolean): alignment according to the eye positions.
normalization (string): normalize the input image before feeding to model
Returns:
Represent function returns a multidimensional vector. The number of dimensions is changing based on the reference model. E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector.
Represent function returns a list of object with multidimensional vector (embedding).
The number of dimensions is changing based on the reference model.
E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector.
"""
resp_objs = []
model = build_model(model_name)
#---------------------------------
# we started to run pre-process in verification. so, this can be skipped if it is coming from verification.
# ---------------------------------
# we have run pre-process in verification. so, this can be skipped if it is coming from verify.
if detector_backend != "skip":
target_size = functions.find_target_size(model_name=model_name)
img_objs = functions.extract_faces(img = img_path,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align)
img_objs = functions.extract_faces(
img=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
else: # skip
if type(img_path) == str:
if isinstance(img_path, str):
img = functions.load_image(img_path)
elif type(img_path).__module__ == np.__name__:
img = img_path.copy()
@ -548,18 +679,18 @@ def represent(img_path, model_name = 'VGG-Face', enforce_detection = True, detec
img_region = [0, 0, img.shape[1], img.shape[0]]
img_objs = [(img, img_region, 0)]
#---------------------------------
# ---------------------------------
for img, region, confidence in img_objs:
#custom normalization
img = functions.normalize_input(img = img, normalization = normalization)
for img, region, _ in img_objs:
# custom normalization
img = functions.normalize_input(img=img, normalization=normalization)
#represent
# represent
if "keras" in str(type(model)):
#new tf versions show progress bar and it is annoying
# new tf versions show progress bar and it is annoying
embedding = model.predict(img, verbose=0)[0].tolist()
else:
#SFace and Dlib are not keras models and no verbose arguments
# SFace and Dlib are not keras models and no verbose arguments
embedding = model.predict(img)[0].tolist()
resp_obj = {}
@ -569,7 +700,17 @@ def represent(img_path, model_name = 'VGG-Face', enforce_detection = True, detec
return resp_objs
def stream(db_path = '', model_name ='VGG-Face', detector_backend = 'opencv', distance_metric = 'cosine', enable_face_analysis = True, source = 0, time_threshold = 5, frame_threshold = 5):
def stream(
db_path="",
model_name="VGG-Face",
detector_backend="opencv",
distance_metric="cosine",
enable_face_analysis=True,
source=0,
time_threshold=5,
frame_threshold=5,
):
"""
This function applies real time face recognition and facial attribute analysis
@ -577,7 +718,8 @@ def stream(db_path = '', model_name ='VGG-Face', detector_backend = 'opencv', di
Parameters:
db_path (string): facial database path. You should store some .jpg files in this folder.
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace or Ensemble
model_name (string): VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
ArcFace, SFace
detector_backend (string): opencv, retinaface, mtcnn, ssd, dlib or mediapipe
@ -594,45 +736,72 @@ def stream(db_path = '', model_name ='VGG-Face', detector_backend = 'opencv', di
"""
if time_threshold < 1:
raise ValueError("time_threshold must be greater than the value 1 but you passed "+str(time_threshold))
raise ValueError(
"time_threshold must be greater than the value 1 but you passed " + str(time_threshold)
)
if frame_threshold < 1:
raise ValueError("frame_threshold must be greater than the value 1 but you passed "+str(frame_threshold))
raise ValueError(
"frame_threshold must be greater than the value 1 but you passed "
+ str(frame_threshold)
)
realtime.analysis(db_path, model_name, detector_backend, distance_metric, enable_face_analysis
, source = source, time_threshold = time_threshold, frame_threshold = frame_threshold)
realtime.analysis(
db_path,
model_name,
detector_backend,
distance_metric,
enable_face_analysis,
source=source,
time_threshold=time_threshold,
frame_threshold=frame_threshold,
)
def extract_faces(img_path, target_size = (224, 224), detector_backend = 'opencv', enforce_detection = True, align = True):
def extract_faces(
img_path,
target_size=(224, 224),
detector_backend="opencv",
enforce_detection=True,
align=True,
grayscale=False,
):
"""
This function applies pre-processing stages of a face recognition pipeline including detection and alignment
This function applies pre-processing stages of a face recognition pipeline
including detection and alignment
Parameters:
img_path: exact image path, numpy array (BGR) or base64 encoded image.
Source image can have many face. Then, result will be the size of number of faces appearing in that source image.
Source image can have many face. Then, result will be the size of number
of faces appearing in that source image.
target_size (tuple): final shape of facial image. black pixels will be added to resize the image.
target_size (tuple): final shape of facial image. black pixels will be
added to resize the image.
detector_backend (string): face detection backends are retinaface, mtcnn, opencv, ssd or dlib
detector_backend (string): face detection backends are retinaface, mtcnn,
opencv, ssd or dlib
enforce_detection (boolean): function throws exception if face cannot be detected in the fed image.
Set this to False if you do not want to get exception and run the function anyway.
enforce_detection (boolean): function throws exception if face cannot be
detected in the fed image. Set this to False if you do not want to get
an exception and run the function anyway.
align (boolean): alignment according to the eye positions.
Returns:
list of dictionaries. Each dictionary will have facial image itself, extracted area from the original image and confidence score.
list of dictionaries. Each dictionary will have facial image itself,
extracted area from the original image and confidence score.
"""
resp_objs = []
img_objs = functions.extract_faces(
img = img_path,
target_size = target_size,
detector_backend = detector_backend,
grayscale = False,
enforce_detection = enforce_detection,
align = align
img=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=grayscale,
enforce_detection=enforce_detection,
align=align,
)
for img, region, confidence in img_objs:
@ -649,11 +818,17 @@ def extract_faces(img_path, target_size = (224, 224), detector_backend = 'opencv
return resp_objs
#---------------------------
#main
# ---------------------------
# main
functions.initialize_folder()
def cli():
"""
command line interface function will be offered in this block
"""
import fire
fire.Fire()

View File

@ -1,93 +1,156 @@
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
import tensorflow
from tensorflow import keras
import os
from pathlib import Path
import gdown
import tensorflow as tf
from deepface.commons import functions
#url = "https://drive.google.com/uc?id=1LVB3CdVejpmGHM28BpqqkbZP5hDEcdZY"
# --------------------------------
# dependency configuration
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5'):
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.engine import training
import keras
from keras.layers import (
ZeroPadding2D,
Input,
Conv2D,
BatchNormalization,
PReLU,
Add,
Dropout,
Flatten,
Dense,
)
else:
from tensorflow.python.keras.engine import training
from tensorflow import keras
from tensorflow.keras.layers import (
ZeroPadding2D,
Input,
Conv2D,
BatchNormalization,
PReLU,
Add,
Dropout,
Flatten,
Dense,
)
# --------------------------------
# url = "https://drive.google.com/uc?id=1LVB3CdVejpmGHM28BpqqkbZP5hDEcdZY"
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
):
base_model = ResNet34()
inputs = base_model.inputs[0]
arcface_model = base_model.outputs[0]
arcface_model = keras.layers.BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
arcface_model = keras.layers.Dropout(0.4)(arcface_model)
arcface_model = keras.layers.Flatten()(arcface_model)
arcface_model = keras.layers.Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(arcface_model)
embedding = keras.layers.BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(arcface_model)
arcface_model = BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
arcface_model = Dropout(0.4)(arcface_model)
arcface_model = Flatten()(arcface_model)
arcface_model = Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(
arcface_model
)
embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
arcface_model
)
model = keras.models.Model(inputs, embedding, name=base_model.name)
#---------------------------------------
#check the availability of pre-trained weights
# ---------------------------------------
# check the availability of pre-trained weights
home = functions.get_deepface_home()
file_name = "arcface_weights.h5"
output = home+'/.deepface/weights/'+file_name
output = home + "/.deepface/weights/" + file_name
if os.path.isfile(output) != True:
print(file_name," will be downloaded to ",output)
print(file_name, " will be downloaded to ", output)
gdown.download(url, output, quiet=False)
#---------------------------------------
# ---------------------------------------
model.load_weights(output)
return model
def ResNet34():
img_input = tensorflow.keras.layers.Input(shape=(112, 112, 3))
img_input = Input(shape=(112, 112, 3))
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name='conv1_pad')(img_input)
x = tensorflow.keras.layers.Conv2D(64, 3, strides=1, use_bias=False, kernel_initializer='glorot_normal', name='conv1_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name='conv1_bn')(x)
x = tensorflow.keras.layers.PReLU(shared_axes=[1, 2], name='conv1_prelu')(x)
x = ZeroPadding2D(padding=1, name="conv1_pad")(img_input)
x = Conv2D(
64, 3, strides=1, use_bias=False, kernel_initializer="glorot_normal", name="conv1_conv"
)(x)
x = BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name="conv1_bn")(x)
x = PReLU(shared_axes=[1, 2], name="conv1_prelu")(x)
x = stack_fn(x)
model = training.Model(img_input, x, name='ResNet34')
model = training.Model(img_input, x, name="ResNet34")
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
bn_axis = 3
if conv_shortcut:
shortcut = tensorflow.keras.layers.Conv2D(filters, 1, strides=stride, use_bias=False, kernel_initializer='glorot_normal', name=name + '_0_conv')(x)
shortcut = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_0_bn')(shortcut)
shortcut = Conv2D(
filters,
1,
strides=stride,
use_bias=False,
kernel_initializer="glorot_normal",
name=name + "_0_conv",
)(x)
shortcut = BatchNormalization(
axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_1_bn')(x)
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name=name + '_1_pad')(x)
x = tensorflow.keras.layers.Conv2D(filters, 3, strides=1, kernel_initializer='glorot_normal', use_bias=False, name=name + '_1_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_2_bn')(x)
x = tensorflow.keras.layers.PReLU(shared_axes=[1, 2], name=name + '_1_prelu')(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_1_bn")(x)
x = ZeroPadding2D(padding=1, name=name + "_1_pad")(x)
x = Conv2D(
filters,
3,
strides=1,
kernel_initializer="glorot_normal",
use_bias=False,
name=name + "_1_conv",
)(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_2_bn")(x)
x = PReLU(shared_axes=[1, 2], name=name + "_1_prelu")(x)
x = tensorflow.keras.layers.ZeroPadding2D(padding=1, name=name + '_2_pad')(x)
x = tensorflow.keras.layers.Conv2D(filters, kernel_size, strides=stride, kernel_initializer='glorot_normal', use_bias=False, name=name + '_2_conv')(x)
x = tensorflow.keras.layers.BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + '_3_bn')(x)
x = ZeroPadding2D(padding=1, name=name + "_2_pad")(x)
x = Conv2D(
filters,
kernel_size,
strides=stride,
kernel_initializer="glorot_normal",
use_bias=False,
name=name + "_2_conv",
)(x)
x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_3_bn")(x)
x = tensorflow.keras.layers.Add(name=name + '_add')([shortcut, x])
x = Add(name=name + "_add")([shortcut, x])
return x
def stack1(x, filters, blocks, stride1=2, name=None):
x = block1(x, filters, stride=stride1, name=name + '_block1')
x = block1(x, filters, stride=stride1, name=name + "_block1")
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
x = block1(x, filters, conv_shortcut=False, name=name + "_block" + str(i))
return x
def stack_fn(x):
x = stack1(x, 64, 3, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
x = stack1(x, 64, 3, name="conv2")
x = stack1(x, 128, 4, name="conv3")
x = stack1(x, 256, 6, name="conv4")
return stack1(x, 512, 3, name="conv5")

View File

@ -1,59 +0,0 @@
from deepface import DeepFace
from tqdm import tqdm
import os
from os import path
from pathlib import Path
import numpy as np
import gdown
from deepface.commons import functions, distance as dst
def loadModel():
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
model = {}
model_pbar = tqdm(range(0, 4), desc='Face recognition models')
for index in model_pbar:
model_name = model_names[index]
model_pbar.set_description("Loading %s" % (model_name))
model[model_name] = DeepFace.build_model(model_name)
return model
def validate_model(model):
#validate model dictionary because it might be passed from input as pre-trained
found_models = []
for key, value in model.items():
found_models.append(key)
if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models):
#print("Ensemble learning will be applied for ", found_models," models")
valid = True
else:
missing_ones = set(['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']) - set(found_models)
raise ValueError("You'd like to apply ensemble method and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+str(found_models)+". So, you need to pass "+str(missing_ones)+" models as well.")
def build_gbm():
#this is not a must dependency
import lightgbm as lgb #lightgbm==2.3.1
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True:
print("face-recognition-ensemble-model.txt will be downloaded...")
url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
output = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
gdown.download(url, output, quiet=False)
ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
deepface_ensemble = lgb.Booster(model_file = ensemble_model_path)
return deepface_ensemble

View File

@ -1,56 +1,81 @@
import os
from pathlib import Path
import gdown
import zipfile
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, Activation, Input, Add, MaxPooling2D, Flatten, Dense, Dropout
import tensorflow as tf
from deepface.commons import functions
#-------------------------------------
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
#url = 'https://drive.google.com/uc?id=1uRLtBCTQQAvHJ_KVrdbRJiCKxU8m5q2J'
if tf_version == 1:
from keras.models import Model
from keras.layers import (
Conv2D,
Activation,
Input,
Add,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
else:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Conv2D,
Activation,
Input,
Add,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5'):
# pylint: disable=line-too-long
# -------------------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
):
myInput = Input(shape=(55, 47, 3))
x = Conv2D(20, (4, 4), name='Conv1', activation='relu', input_shape=(55, 47, 3))(myInput)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool1')(x)
x = Dropout(rate=0.99, name='D1')(x)
x = Conv2D(20, (4, 4), name="Conv1", activation="relu", input_shape=(55, 47, 3))(myInput)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool1")(x)
x = Dropout(rate=0.99, name="D1")(x)
x = Conv2D(40, (3, 3), name='Conv2', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool2')(x)
x = Dropout(rate=0.99, name='D2')(x)
x = Conv2D(40, (3, 3), name="Conv2", activation="relu")(x)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool2")(x)
x = Dropout(rate=0.99, name="D2")(x)
x = Conv2D(60, (3, 3), name='Conv3', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool3')(x)
x = Dropout(rate=0.99, name='D3')(x)
x = Conv2D(60, (3, 3), name="Conv3", activation="relu")(x)
x = MaxPooling2D(pool_size=2, strides=2, name="Pool3")(x)
x = Dropout(rate=0.99, name="D3")(x)
x1 = Flatten()(x)
fc11 = Dense(160, name = 'fc11')(x1)
fc11 = Dense(160, name="fc11")(x1)
x2 = Conv2D(80, (2, 2), name='Conv4', activation='relu')(x)
x2 = Conv2D(80, (2, 2), name="Conv4", activation="relu")(x)
x2 = Flatten()(x2)
fc12 = Dense(160, name = 'fc12')(x2)
fc12 = Dense(160, name="fc12")(x2)
y = Add()([fc11, fc12])
y = Activation('relu', name = 'deepid')(y)
y = Activation("relu", name="deepid")(y)
model = Model(inputs=[myInput], outputs=y)
#---------------------------------
# ---------------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/deepid_keras_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True:
print("deepid_keras_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/deepid_keras_weights.h5'
output = home + "/.deepface/weights/deepid_keras_weights.h5"
gdown.download(url, output, quiet=False)
model.load_weights(home+'/.deepface/weights/deepid_keras_weights.h5')
model.load_weights(home + "/.deepface/weights/deepid_keras_weights.h5")
return model

View File

@ -1,60 +1,62 @@
import os
import zipfile
import bz2
import gdown
import numpy as np
from pathlib import Path
from deepface.commons import functions
class DlibResNet:
# pylint: disable=too-few-public-methods
class DlibResNet:
def __init__(self):
#this is not a must dependency
import dlib #19.20.0
# this is not a must dependency
import dlib # 19.20.0
self.layers = [DlibMetaData()]
#---------------------
# ---------------------
home = functions.get_deepface_home()
weight_file = home+'/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat'
weight_file = home + "/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat"
#---------------------
# ---------------------
#download pre-trained model if it does not exist
# download pre-trained model if it does not exist
if os.path.isfile(weight_file) != True:
print("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
url = "http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2"
output = home+'/.deepface/weights/'+url.split("/")[-1]
file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2"
url = f"http://dlib.net/files/{file_name}"
output = f"{home}/.deepface/weights/{file_name}"
gdown.download(url, output, quiet=False)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] #discard .bz2 extension
open(newfilepath, 'wb').write(data)
newfilepath = output[:-4] # discard .bz2 extension
with open(newfilepath, "wb") as f:
f.write(data)
#---------------------
# ---------------------
model = dlib.face_recognition_model_v1(weight_file)
self.__model = model
#---------------------
# ---------------------
return None #classes must return None
# return None # classes must return None
def predict(self, img_aligned):
#functions.detectFace returns 4 dimensional images
# functions.detectFace returns 4 dimensional images
if len(img_aligned.shape) == 4:
img_aligned = img_aligned[0]
#functions.detectFace returns bgr images
img_aligned = img_aligned[:,:,::-1] #bgr to rgb
# functions.detectFace returns bgr images
img_aligned = img_aligned[:, :, ::-1] # bgr to rgb
#deepface.detectFace returns an array in scale of [0, 1] but dlib expects in scale of [0, 255]
# deepface.detectFace returns an array in scale of [0, 1]
# but dlib expects in scale of [0, 255]
if img_aligned.max() <= 1:
img_aligned = img_aligned * 255
@ -65,10 +67,11 @@ class DlibResNet:
img_representation = model.compute_face_descriptor(img_aligned)
img_representation = np.array(img_representation)
img_representation = np.expand_dims(img_representation, axis = 0)
img_representation = np.expand_dims(img_representation, axis=0)
return img_representation
class DlibMetaData:
def __init__(self):
self.input_shape = [[1, 150, 150, 3]]

View File

@ -1,4 +1,5 @@
from deepface.basemodels.DlibResNet import DlibResNet
def loadModel():
return DlibResNet()

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +1,29 @@
from deepface.basemodels import Facenet
from pathlib import Path
import os
import gdown
from deepface.basemodels import Facenet
from deepface.commons import functions
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5'):
model = Facenet.InceptionResNetV2(dimension = 512)
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
):
#-------------------------
model = Facenet.InceptionResNetV2(dimension=512)
# -------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/facenet512_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True:
print("facenet512_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/facenet512_weights.h5'
output = home + "/.deepface/weights/facenet512_weights.h5"
gdown.download(url, output, quiet=False)
#-------------------------
# -------------------------
model.load_weights(home+'/.deepface/weights/facenet512_weights.h5')
model.load_weights(home + "/.deepface/weights/facenet512_weights.h5")
#-------------------------
# -------------------------
return model

View File

@ -1,47 +1,75 @@
import os
from pathlib import Path
import gdown
import zipfile
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, LocallyConnected2D, MaxPooling2D, Flatten, Dense, Dropout
import gdown
import tensorflow as tf
from deepface.commons import functions
#-------------------------------------
# --------------------------------
# dependency configuration
def loadModel(url = 'https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'):
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
else:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (
Convolution2D,
LocallyConnected2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
)
# -------------------------------------
# pylint: disable=line-too-long
def loadModel(
url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
):
base_model = Sequential()
base_model.add(Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3)))
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2'))
base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3'))
base_model.add(LocallyConnected2D(16, (9, 9), activation='relu', name='L4'))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5') )
base_model.add(LocallyConnected2D(16, (5, 5), activation='relu', name='L6'))
base_model.add(Flatten(name='F0'))
base_model.add(Dense(4096, activation='relu', name='F7'))
base_model.add(Dropout(rate=0.5, name='D0'))
base_model.add(Dense(8631, activation='softmax', name='F8'))
base_model.add(
Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
)
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
base_model.add(Flatten(name="F0"))
base_model.add(Dense(4096, activation="relu", name="F7"))
base_model.add(Dropout(rate=0.5, name="D0"))
base_model.add(Dense(8631, activation="softmax", name="F8"))
#---------------------------------
# ---------------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5') != True:
if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
output = home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
gdown.download(url, output, quiet=False)
#unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
# unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
with zipfile.ZipFile(output, "r") as zip_ref:
zip_ref.extractall(home + "/.deepface/weights/")
base_model.load_weights(home+'/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5')
base_model.load_weights(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5")
#drop F8 and D0. F7 is the representation layer.
# drop F8 and D0. F7 is the representation layer.
deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
return deepface_model

View File

@ -1,251 +1,376 @@
import os
from pathlib import Path
import gdown
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from deepface.commons import functions
#---------------------------------------
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model
from keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
from keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from keras.layers import MaxPooling2D, AveragePooling2D
from keras import backend as K
else:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras import backend as K
#url = 'https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'
# pylint: disable=unnecessary-lambda
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5'):
# ---------------------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
):
myInput = Input(shape=(96, 96, 3))
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
x = Conv2D(64, (1, 1), name="conv2")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = Conv2D(192, (3, 3), name="conv3")(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
x = Activation("relu")(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(x) # x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1")(
inception_3a_3x3
)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2")(
inception_3a_3x3
)
inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1")(
inception_3a_5x5
)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2")(
inception_3a_5x5
)
inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
inception_3a_pool = Activation('relu')(inception_3a_pool)
inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_pool_bn")(
inception_3a_pool
)
inception_3a_pool = Activation("relu")(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
inception_3a_1x1 = Activation('relu')(inception_3a_1x1)
inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_1x1_bn")(
inception_3a_1x1
)
inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)
inception_3a = concatenate(
[inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3
)
# Inception3b
inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1")(
inception_3b_3x3
)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2")(
inception_3b_3x3
)
inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1")(
inception_3b_5x5
)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2")(
inception_3b_5x5
)
inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
inception_3b_pool = Lambda(lambda x: x**2, name="power2_3b")(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
inception_3b_pool = Activation('relu')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_pool_bn")(
inception_3b_pool
)
inception_3b_pool = Activation("relu")(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
inception_3b_1x1 = Activation('relu')(inception_3b_1x1)
inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_1x1_bn")(
inception_3b_1x1
)
inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)
inception_3b = concatenate(
[inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3
)
# Inception3c
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1")(
inception_3b
)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1")(
inception_3c_3x3
)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2")(
inception_3c_3x3
)
inception_3c_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
)(inception_3c_3x3)
inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1")(
inception_3b
)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1")(
inception_3c_5x5
)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2")(
inception_3c_5x5
)
inception_3c_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
)(inception_3c_5x5)
inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
#inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
# inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1")(
inception_3c
)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2")(
inception_4a_3x3
)
inception_4a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
)(inception_4a_3x3)
inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2,2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1")(
inception_3c
)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1")(
inception_4a_5x5
)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2")(
inception_4a_5x5
)
inception_4a_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
)(inception_4a_5x5)
inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
inception_4a_pool = Lambda(lambda x: x**2, name="power2_4a")(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool)
inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool)
inception_4a_pool = Activation('relu')(inception_4a_pool)
inception_4a_pool = Conv2D(128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + "")(
inception_4a_pool
)
inception_4a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
)(inception_4a_pool)
inception_4a_pool = Activation("relu")(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1)
inception_4a_1x1 = Activation('relu')(inception_4a_1x1)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + "")(
inception_3c
)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + "")(
inception_4a_1x1
)
inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)
inception_4a = concatenate(
[inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3
)
#inception4e
inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
# inception4e
inception_4e_3x3 = Conv2D(160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1")(
inception_4a
)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2")(
inception_4e_3x3
)
inception_4e_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
)(inception_4e_3x3)
inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = Conv2D(64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1")(
inception_4a
)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2")(
inception_4e_5x5
)
inception_4e_5x5 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
)(inception_4e_5x5)
inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
#inception5a
inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
# inception5a
inception_5a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1")(
inception_4e
)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2")(
inception_5a_3x3
)
inception_5a_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
)(inception_5a_3x3)
inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
inception_5a_pool = Lambda(lambda x: x**2, name="power2_5a")(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool)
inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool)
inception_5a_pool = Activation('relu')(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1,1))(inception_5a_pool)
inception_5a_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + "")(
inception_5a_pool
)
inception_5a_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
)(inception_5a_pool)
inception_5a_pool = Activation("relu")(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1)
inception_5a_1x1 = Activation('relu')(inception_5a_1x1)
inception_5a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + "")(
inception_4e
)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + "")(
inception_5a_1x1
)
inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
#inception_5b
inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1,1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
# inception_5b
inception_5b_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1")(
inception_5a
)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2")(
inception_5b_3x3
)
inception_5b_3x3 = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
)(inception_5b_3x3)
inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool)
inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
inception_5b_pool = Activation('relu')(inception_5b_pool)
inception_5b_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + "")(
inception_5b_pool
)
inception_5b_pool = BatchNormalization(
axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
)(inception_5b_pool)
inception_5b_pool = Activation("relu")(inception_5b_pool)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
inception_5b_1x1 = Activation('relu')(inception_5b_1x1)
inception_5b_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + "")(
inception_5a
)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + "")(
inception_5b_1x1
)
inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name='dense_layer')(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)
dense_layer = Dense(128, name="dense_layer")(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(dense_layer)
# Final Model
model = Model(inputs=[myInput], outputs=norm_layer)
#-----------------------------------
# -----------------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/openface_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True:
print("openface_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/openface_weights.h5'
output = home + "/.deepface/weights/openface_weights.h5"
gdown.download(url, output, quiet=False)
#-----------------------------------
# -----------------------------------
model.load_weights(home+'/.deepface/weights/openface_weights.h5')
model.load_weights(home + "/.deepface/weights/openface_weights.h5")
#-----------------------------------
# -----------------------------------
return model

View File

@ -5,25 +5,28 @@ import gdown
from deepface.commons import functions
# pylint: disable=line-too-long, too-few-public-methods
class _Layer:
input_shape = (None, 112, 112, 3)
output_shape = (None, 1, 128)
class SFaceModel:
class SFaceModel:
def __init__(self, model_path):
self.model = cv.FaceRecognizerSF.create(
model = model_path,
config = "",
backend_id = 0,
target_id = 0)
model=model_path, config="", backend_id=0, target_id=0
)
self.layers = [_Layer()]
def predict(self, image):
# Preprocess
input_blob = (image[0] * 255).astype(np.uint8) # revert the image to original format and preprocess using the model
input_blob = (image[0] * 255).astype(
np.uint8
) # revert the image to original format and preprocess using the model
# Forward
embeddings = self.model.feature(input_blob)
@ -31,11 +34,13 @@ class SFaceModel:
return embeddings
def load_model(url = "https://github.com/opencv/opencv_zoo/raw/master/models/face_recognition_sface/face_recognition_sface_2021dec.onnx"):
def load_model(
url="https://github.com/opencv/opencv_zoo/raw/master/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
):
home = functions.get_deepface_home()
file_name = home + '/.deepface/weights/face_recognition_sface_2021dec.onnx'
file_name = home + "/.deepface/weights/face_recognition_sface_2021dec.onnx"
if not os.path.isfile(file_name):
@ -43,6 +48,6 @@ def load_model(url = "https://github.com/opencv/opencv_zoo/raw/master/models/fac
gdown.download(url, file_name, quiet=False)
model = SFaceModel(model_path = file_name)
model = SFaceModel(model_path=file_name)
return model

View File

@ -1,92 +1,110 @@
import os
from pathlib import Path
import gdown
import tensorflow as tf
from deepface.commons import functions
import tensorflow as tf
# ---------------------------------------
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from keras.layers import (
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dropout,
Activation,
)
else:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from tensorflow.keras.layers import (
Convolution2D,
ZeroPadding2D,
MaxPooling2D,
Flatten,
Dropout,
Activation,
)
# ---------------------------------------
#---------------------------------------
def baseModel():
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Convolution2D(4096, (7, 7), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Convolution2D(4096, (1, 1), activation="relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.add(Activation("softmax"))
return model
#url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5'):
# url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
):
model = baseModel()
#-----------------------------------
# -----------------------------------
home = functions.get_deepface_home()
output = home+'/.deepface/weights/vgg_face_weights.h5'
output = home + "/.deepface/weights/vgg_face_weights.h5"
if os.path.isfile(output) != True:
print("vgg_face_weights.h5 will be downloaded...")
gdown.download(url, output, quiet=False)
#-----------------------------------
# -----------------------------------
model.load_weights(output)
#-----------------------------------
# -----------------------------------
#TO-DO: why?
# TO-DO: why?
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
return vgg_face_descriptor

View File

@ -1,16 +1,18 @@
import numpy as np
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
if type(source_representation) == list:
if isinstance(source_representation, list):
source_representation = np.array(source_representation)
if type(test_representation) == list:
if isinstance(test_representation, list):
test_representation = np.array(test_representation)
euclidean_distance = source_representation - test_representation
@ -18,24 +20,25 @@ def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
def l2_normalize(x):
return x / np.sqrt(np.sum(np.multiply(x, x)))
def findThreshold(model_name, distance_metric):
base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}
base_threshold = {"cosine": 0.40, "euclidean": 0.55, "euclidean_l2": 0.75}
thresholds = {
'VGG-Face': {'cosine': 0.40, 'euclidean': 0.60, 'euclidean_l2': 0.86},
'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80},
'Facenet512': {'cosine': 0.30, 'euclidean': 23.56, 'euclidean_l2': 1.04},
'ArcFace': {'cosine': 0.68, 'euclidean': 4.15, 'euclidean_l2': 1.13},
'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.4},
'SFace': {'cosine': 0.5932763306134152, 'euclidean': 10.734038121282206, 'euclidean_l2': 1.055836701022614},
'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55},
'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64},
'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17}
"VGG-Face": {"cosine": 0.40, "euclidean": 0.60, "euclidean_l2": 0.86},
"Facenet": {"cosine": 0.40, "euclidean": 10, "euclidean_l2": 0.80},
"Facenet512": {"cosine": 0.30, "euclidean": 23.56, "euclidean_l2": 1.04},
"ArcFace": {"cosine": 0.68, "euclidean": 4.15, "euclidean_l2": 1.13},
"Dlib": {"cosine": 0.07, "euclidean": 0.6, "euclidean_l2": 0.4},
"SFace": {"cosine": 0.593, "euclidean": 10.734, "euclidean_l2": 1.055},
"OpenFace": {"cosine": 0.10, "euclidean": 0.55, "euclidean_l2": 0.55},
"DeepFace": {"cosine": 0.23, "euclidean": 64, "euclidean_l2": 0.64},
"DeepID": {"cosine": 0.015, "euclidean": 45, "euclidean_l2": 0.17},
}
threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)

View File

@ -1,56 +1,63 @@
import os
import numpy as np
import pandas as pd
import cv2
import base64
from pathlib import Path
from PIL import Image
import requests
# 3rd party dependencies
import numpy as np
import cv2
import tensorflow as tf
# package dependencies
from deepface.detectors import FaceDetector
import tensorflow as tf
# --------------------------------------------------
# configurations of dependencies
tf_version = tf.__version__
tf_major_version = int(tf_version.split(".")[0])
tf_major_version = int(tf_version.split(".", maxsplit=1)[0])
tf_minor_version = int(tf_version.split(".")[1])
if tf_major_version == 1:
import keras
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
elif tf_major_version == 2:
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.preprocessing import image
#--------------------------------------------------
# --------------------------------------------------
def initialize_folder():
home = get_deepface_home()
if not os.path.exists(home+"/.deepface"):
os.makedirs(home+"/.deepface")
if not os.path.exists(home + "/.deepface"):
os.makedirs(home + "/.deepface")
print("Directory ", home, "/.deepface created")
if not os.path.exists(home+"/.deepface/weights"):
os.makedirs(home+"/.deepface/weights")
if not os.path.exists(home + "/.deepface/weights"):
os.makedirs(home + "/.deepface/weights")
print("Directory ", home, "/.deepface/weights created")
def get_deepface_home():
return str(os.getenv('DEEPFACE_HOME', default=Path.home()))
#--------------------------------------------------
def get_deepface_home():
return str(os.getenv("DEEPFACE_HOME", default=str(Path.home())))
# --------------------------------------------------
def loadBase64Img(uri):
encoded_data = uri.split(',')[1]
encoded_data = uri.split(",")[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def load_image(img):
exact_image = False; base64_img = False; url_img = False
exact_image = False
base64_img = False
url_img = False
if type(img).__module__ == np.__name__:
exact_image = True
@ -61,49 +68,62 @@ def load_image(img):
elif len(img) > 11 and img.startswith("http"):
url_img = True
#---------------------------
# ---------------------------
if base64_img == True:
if base64_img is True:
img = loadBase64Img(img)
elif url_img:
img = np.array(Image.open(requests.get(img, stream=True).raw).convert('RGB'))
elif url_img is True:
img = np.array(Image.open(requests.get(img, stream=True).raw).convert("RGB"))
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
elif exact_image is not True: # image path passed as input
if os.path.isfile(img) is not True:
raise ValueError(f"Confirm that {img} exists")
img = cv2.imread(img)
return img
#--------------------------------------------------
def extract_faces(img, target_size=(224, 224), detector_backend = 'opencv', grayscale = False, enforce_detection = True, align = True):
# --------------------------------------------------
def extract_faces(
img,
target_size=(224, 224),
detector_backend="opencv",
grayscale=False,
enforce_detection=True,
align=True,
):
# this is going to store a list of img itself (numpy), it region and confidence
extracted_faces = []
#img might be path, base64 or numpy array. Convert it to numpy whatever it is.
# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img = load_image(img)
img_region = [0, 0, img.shape[1], img.shape[0]]
if detector_backend == 'skip':
if detector_backend == "skip":
face_objs = [(img, img_region, 0)]
else:
face_detector = FaceDetector.build_model(detector_backend)
face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align)
# in case of no face found
if len(face_objs) == 0 and enforce_detection == True:
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
elif len(face_objs) == 0 and enforce_detection == False:
if len(face_objs) == 0 and enforce_detection is True:
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo "
+ "or consider to set enforce_detection param to False."
)
if len(face_objs) == 0 and enforce_detection is False:
face_objs = [(img, img_region, 0)]
for current_img, current_region, confidence in face_objs:
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
if grayscale == True:
if grayscale is True:
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
# resize and padding
@ -117,78 +137,102 @@ def extract_faces(img, target_size=(224, 224), detector_backend = 'opencv', gray
diff_0 = target_size[0] - current_img.shape[0]
diff_1 = target_size[1] - current_img.shape[1]
if grayscale == False:
if grayscale is False:
# Put the base image in the middle of the padded image
current_img = np.pad(current_img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
(0, 0),
),
"constant",
)
else:
current_img = np.pad(current_img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')
current_img = np.pad(
current_img,
((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)),
"constant",
)
#double check: if target image is not still the same size with target.
# double check: if target image is not still the same size with target.
if current_img.shape[0:2] != target_size:
current_img = cv2.resize(current_img, target_size)
#normalizing the image pixels
img_pixels = image.img_to_array(current_img) #what this line doing? must?
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #normalize input in [0, 1]
# normalizing the image pixels
img_pixels = image.img_to_array(current_img) # what this line doing? must?
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255 # normalize input in [0, 1]
#int cast is for the exception - object of type 'float32' is not JSON serializable
region_obj = {"x": int(current_region[0]), "y": int(current_region[1]), "w": int(current_region[2]), "h": int(current_region[3])}
# int cast is for the exception - object of type 'float32' is not JSON serializable
region_obj = {
"x": int(current_region[0]),
"y": int(current_region[1]),
"w": int(current_region[2]),
"h": int(current_region[3]),
}
extracted_face = [img_pixels, region_obj, confidence]
extracted_faces.append(extracted_face)
if len(extracted_faces) == 0 and enforce_detection == True:
raise ValueError("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.")
raise ValueError(
"Detected face shape is ",
img.shape,
". Consider to set enforce_detection argument to False.",
)
return extracted_faces
def normalize_input(img, normalization = 'base'):
#issue 131 declares that some normalization techniques improves the accuracy
def normalize_input(img, normalization="base"):
if normalization == 'base':
# issue 131 declares that some normalization techniques improves the accuracy
if normalization == "base":
return img
else:
#@trevorgribble and @davedgd contributed this feature
img *= 255 #restore input in scale of [0, 255] because it was normalized in scale of [0, 1] in preprocess_face
# @trevorgribble and @davedgd contributed this feature
# restore input in scale of [0, 255] because it was normalized in scale of
# [0, 1] in preprocess_face
img *= 255
if normalization == 'raw':
pass #return just restored pixels
if normalization == "raw":
pass # return just restored pixels
elif normalization == 'Facenet':
elif normalization == "Facenet":
mean, std = img.mean(), img.std()
img = (img - mean) / std
elif(normalization=="Facenet2018"):
elif normalization == "Facenet2018":
# simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)
img /= 127.5
img -= 1
elif normalization == 'VGGFace':
elif normalization == "VGGFace":
# mean subtraction based on VGGFace1 training data
img[..., 0] -= 93.5940
img[..., 1] -= 104.7624
img[..., 2] -= 129.1863
elif(normalization == 'VGGFace2'):
elif normalization == "VGGFace2":
# mean subtraction based on VGGFace2 training data
img[..., 0] -= 91.4953
img[..., 1] -= 103.8827
img[..., 2] -= 131.0912
elif(normalization == 'ArcFace'):
#Reference study: The faces are cropped and resized to 112×112,
#and each pixel (ranged between [0, 255]) in RGB images is normalised
#by subtracting 127.5 then divided by 128.
elif normalization == "ArcFace":
# Reference study: The faces are cropped and resized to 112×112,
# and each pixel (ranged between [0, 255]) in RGB images is normalised
# by subtracting 127.5 then divided by 128.
img -= 127.5
img /= 128
#-----------------------------
else:
raise ValueError(f"unimplemented normalization type - {normalization}")
return img
def find_target_size(model_name):
target_sizes = {
@ -200,10 +244,12 @@ def find_target_size(model_name):
"DeepID": (55, 47),
"Dlib": (150, 150),
"ArcFace": (112, 112),
"SFace": (112, 112)
"SFace": (112, 112),
}
if model_name not in target_sizes.keys():
target_size = target_sizes.get(model_name)
if target_size == None:
raise ValueError(f"unimplemented model name - {model_name}")
return target_sizes[model_name]
return target_size

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +1,47 @@
from pathlib import Path
import gdown
import bz2
import os
import bz2
import gdown
from deepface.commons import functions
def build_model():
home = functions.get_deepface_home()
import dlib #this requirement is not a must that's why imported here
import dlib # this requirement is not a must that's why imported here
#check required file exists in the home/.deepface/weights folder
if os.path.isfile(home+'/.deepface/weights/shape_predictor_5_face_landmarks.dat') != True:
# check required file exists in the home/.deepface/weights folder
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
print("shape_predictor_5_face_landmarks.dat.bz2 is going to be downloaded")
file_name = "shape_predictor_5_face_landmarks.dat.bz2"
print(f"{file_name} is going to be downloaded")
url = "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
output = home+'/.deepface/weights/'+url.split("/")[-1]
url = f"http://dlib.net/files/{file_name}"
output = f"{home}/.deepface/weights/{file_name}"
gdown.download(url, output, quiet=False)
zipfile = bz2.BZ2File(output)
data = zipfile.read()
newfilepath = output[:-4] #discard .bz2 extension
open(newfilepath, 'wb').write(data)
newfilepath = output[:-4] # discard .bz2 extension
with open(newfilepath, "wb") as f:
f.write(data)
face_detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat")
sp = dlib.shape_predictor(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat")
detector = {}
detector["face_detector"] = face_detector
detector["sp"] = sp
return detector
def detect_face(detector, img, align = True):
import dlib #this requirement is not a must that's why imported here
def detect_face(detector, img, align=True):
import dlib # this requirement is not a must that's why imported here
resp = []
home = str(Path.home())
sp = detector["sp"]
detected_face = None
@ -50,26 +50,29 @@ def detect_face(detector, img, align = True):
face_detector = detector["face_detector"]
#note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
# note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
detections, scores, _ = face_detector.run(img, 1)
if len(detections) > 0:
for idx, d in enumerate(detections):
left = d.left(); right = d.right()
top = d.top(); bottom = d.bottom()
left = d.left()
right = d.right()
top = d.top()
bottom = d.bottom()
#detected_face = img[top:bottom, left:right]
detected_face = img[max(0, top): min(bottom, img.shape[0]), max(0, left): min(right, img.shape[1])]
# detected_face = img[top:bottom, left:right]
detected_face = img[
max(0, top) : min(bottom, img.shape[0]), max(0, left) : min(right, img.shape[1])
]
img_region = [left, top, right - left, bottom - top]
confidence = scores[idx]
if align:
img_shape = sp(img, detections[idx])
detected_face = dlib.get_face_chip(img, img_shape, size = detected_face.shape[0])
detected_face = dlib.get_face_chip(img, img_shape, size=detected_face.shape[0])
resp.append((detected_face, img_region, confidence))
return resp

View File

@ -1,106 +1,116 @@
from deepface.detectors import OpenCvWrapper, SsdWrapper, DlibWrapper, MtcnnWrapper, RetinaFaceWrapper,MediapipeWrapper
from PIL import Image
import math
from PIL import Image
import numpy as np
from deepface.commons import distance
from deepface.detectors import (
OpenCvWrapper,
SsdWrapper,
DlibWrapper,
MtcnnWrapper,
RetinaFaceWrapper,
MediapipeWrapper,
)
def build_model(detector_backend):
global face_detector_obj #singleton design pattern
global face_detector_obj # singleton design pattern
backends = {
'opencv': OpenCvWrapper.build_model,
'ssd': SsdWrapper.build_model,
'dlib': DlibWrapper.build_model,
'mtcnn': MtcnnWrapper.build_model,
'retinaface': RetinaFaceWrapper.build_model,
'mediapipe': MediapipeWrapper.build_model
"opencv": OpenCvWrapper.build_model,
"ssd": SsdWrapper.build_model,
"dlib": DlibWrapper.build_model,
"mtcnn": MtcnnWrapper.build_model,
"retinaface": RetinaFaceWrapper.build_model,
"mediapipe": MediapipeWrapper.build_model,
}
if not "face_detector_obj" in globals():
face_detector_obj = {}
if not detector_backend in face_detector_obj.keys():
built_models = list(face_detector_obj.keys())
if detector_backend not in built_models:
face_detector = backends.get(detector_backend)
if face_detector:
face_detector = face_detector()
face_detector_obj[detector_backend] = face_detector
#print(detector_backend," built")
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
return face_detector_obj[detector_backend]
def detect_face(face_detector, detector_backend, img, align = True):
def detect_face(face_detector, detector_backend, img, align=True):
obj = detect_faces(face_detector, detector_backend, img, align)
if len(obj) > 0:
face, region, confidence = obj[0] #discard multiple faces
else: #len(obj) == 0
face, region, confidence = obj[0] # discard multiple faces
else: # len(obj) == 0
face = None
region = [0, 0, img.shape[1], img.shape[0]]
return face, region, confidence
def detect_faces(face_detector, detector_backend, img, align = True):
def detect_faces(face_detector, detector_backend, img, align=True):
backends = {
'opencv': OpenCvWrapper.detect_face,
'ssd': SsdWrapper.detect_face,
'dlib': DlibWrapper.detect_face,
'mtcnn': MtcnnWrapper.detect_face,
'retinaface': RetinaFaceWrapper.detect_face,
'mediapipe': MediapipeWrapper.detect_face
"opencv": OpenCvWrapper.detect_face,
"ssd": SsdWrapper.detect_face,
"dlib": DlibWrapper.detect_face,
"mtcnn": MtcnnWrapper.detect_face,
"retinaface": RetinaFaceWrapper.detect_face,
"mediapipe": MediapipeWrapper.detect_face,
}
detect_face = backends.get(detector_backend)
if detect_face:
obj = detect_face(face_detector, img, align)
#obj stores list of (detected_face, region, confidence)
detect_face_fn = backends.get(detector_backend)
if detect_face_fn: # pylint: disable=no-else-return
obj = detect_face_fn(face_detector, img, align)
# obj stores list of (detected_face, region, confidence)
return obj
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
def alignment_procedure(img, left_eye, right_eye):
#this function aligns given face in img based on left and right eye coordinates
# this function aligns given face in img based on left and right eye coordinates
left_eye_x, left_eye_y = left_eye
right_eye_x, right_eye_y = right_eye
#-----------------------
#find rotation direction
# -----------------------
# find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
direction = -1 # rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
direction = 1 # rotate inverse direction of clock
#-----------------------
#find length of triangle edges
# -----------------------
# find length of triangle edges
a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd))
b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd))
c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye))
#-----------------------
# -----------------------
#apply cosine rule
# apply cosine rule
if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation
if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation
cos_a = (b*b + c*c - a*a)/(2*b*c)
angle = np.arccos(cos_a) #angle in radian
angle = (angle * 180) / math.pi #radian to degree
cos_a = (b * b + c * c - a * a) / (2 * b * c)
angle = np.arccos(cos_a) # angle in radian
angle = (angle * 180) / math.pi # radian to degree
#-----------------------
#rotate base image
# -----------------------
# rotate base image
if direction == -1:
angle = 90 - angle
@ -108,6 +118,6 @@ def alignment_procedure(img, left_eye, right_eye):
img = Image.fromarray(img)
img = np.array(img.rotate(direction * angle))
#-----------------------
# -----------------------
return img #return img anyway
return img # return img anyway

View File

@ -1,26 +1,28 @@
from deepface.detectors import FaceDetector
# Link - https://google.github.io/mediapipe/solutions/face_detection
def build_model():
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
import mediapipe as mp # this is not a must dependency. do not import it in the global level.
mp_face_detection = mp.solutions.face_detection
face_detection = mp_face_detection.FaceDetection( min_detection_confidence=0.7)
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
return face_detection
def detect_face(face_detector, img, align = True):
import mediapipe as mp #this is not a must dependency. do not import it in the global level.
def detect_face(face_detector, img, align=True):
resp = []
img_width = img.shape[1]; img_height = img.shape[0]
img_width = img.shape[1]
img_height = img.shape[0]
results = face_detector.process(img)
if results.detections:
for detection in results.detections:
confidence, = detection.score
(confidence,) = detection.score
bounding_box = detection.location_data.relative_bounding_box
landmarks = detection.location_data.relative_keypoints
@ -32,17 +34,19 @@ def detect_face(face_detector, img, align = True):
right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
#nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
#mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
#right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
#left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
# nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
# mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
# right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
# left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
if x > 0 and y > 0:
detected_face = img[y:y+h, x:x+w]
detected_face = img[y : y + h, x : x + w]
img_region = [x, y, w, h]
if align:
detected_face = FaceDetector.alignment_procedure(detected_face, left_eye, right_eye)
detected_face = FaceDetector.alignment_procedure(
detected_face, left_eye, right_eye
)
resp.append((detected_face, img_region, confidence))

View File

@ -1,26 +1,29 @@
import cv2
from deepface.detectors import FaceDetector
def build_model():
from mtcnn import MTCNN
face_detector = MTCNN()
return face_detector
def detect_face(face_detector, img, align = True):
def detect_face(face_detector, img, align=True):
resp = []
detected_face = None
img_region = [0, 0, img.shape[1], img.shape[0]]
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
detections = face_detector.detect_faces(img_rgb)
if len(detections) > 0:
for detection in detections:
x, y, w, h = detection["box"]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
img_region = [x, y, w, h]
confidence = detection["confidence"]

View File

@ -1,42 +1,44 @@
import cv2
import os
import pandas as pd
import cv2
from deepface.detectors import FaceDetector
def build_model():
detector = {}
detector["face_detector"] = build_cascade("haarcascade")
detector["eye_detector"] = build_cascade("haarcascade_eye")
return detector
detector ={}
detector["face_detector"] = build_cascade('haarcascade')
detector["eye_detector"] = build_cascade('haarcascade_eye')
def build_cascade(model_name="haarcascade"):
opencv_path = get_opencv_path()
if model_name == "haarcascade":
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
face_detector_path,
" violated.",
)
detector = cv2.CascadeClassifier(face_detector_path)
elif model_name == "haarcascade_eye":
eye_detector_path = opencv_path + "haarcascade_eye.xml"
if os.path.isfile(eye_detector_path) != True:
raise ValueError(
"Confirm that opencv is installed on your environment! Expected path ",
eye_detector_path,
" violated.",
)
detector = cv2.CascadeClassifier(eye_detector_path)
else:
raise ValueError(f"unimplemented model_name for build_cascade - {model_name}")
return detector
def build_cascade(model_name = 'haarcascade'):
opencv_path = get_opencv_path()
if model_name == 'haarcascade':
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
face_detector = cv2.CascadeClassifier(face_detector_path)
return face_detector
elif model_name == 'haarcascade_eye':
eye_detector_path = opencv_path+"haarcascade_eye.xml"
if os.path.isfile(eye_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",eye_detector_path," violated.")
eye_detector = cv2.CascadeClassifier(eye_detector_path)
return eye_detector
def detect_face(detector, img, align = True):
def detect_face(detector, img, align=True):
resp = []
detected_face = None
@ -44,17 +46,19 @@ def detect_face(detector, img, align = True):
faces = []
try:
#faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
# faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
#note that, by design, opencv's haarcascade scores are >0 but not capped at 1
faces, _, scores = detector["face_detector"].detectMultiScale3(img, 1.1, 10, outputRejectLevels = True)
# note that, by design, opencv's haarcascade scores are >0 but not capped at 1
faces, _, scores = detector["face_detector"].detectMultiScale3(
img, 1.1, 10, outputRejectLevels=True
)
except:
pass
if len(faces) > 0:
for (x,y,w,h), confidence in zip(faces, scores):
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
for (x, y, w, h), confidence in zip(faces, scores):
detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
if align:
detected_face = align_face(detector["eye_detector"], detected_face)
@ -65,41 +69,48 @@ def detect_face(detector, img, align = True):
return resp
def align_face(eye_detector, img):
detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #eye detector expects gray scale image
detected_face_gray = cv2.cvtColor(
img, cv2.COLOR_BGR2GRAY
) # eye detector expects gray scale image
#eyes = eye_detector.detectMultiScale(detected_face_gray, 1.3, 5)
# eyes = eye_detector.detectMultiScale(detected_face_gray, 1.3, 5)
eyes = eye_detector.detectMultiScale(detected_face_gray, 1.1, 10)
#----------------------------------------------------------------
# ----------------------------------------------------------------
#opencv eye detectin module is not strong. it might find more than 2 eyes!
#besides, it returns eyes with different order in each call (issue 435)
#this is an important issue because opencv is the default detector and ssd also uses this
#find the largest 2 eye. Thanks to @thelostpeace
# opencv eye detectin module is not strong. it might find more than 2 eyes!
# besides, it returns eyes with different order in each call (issue 435)
# this is an important issue because opencv is the default detector and ssd also uses this
# find the largest 2 eye. Thanks to @thelostpeace
eyes = sorted(eyes, key = lambda v: abs((v[0] - v[2]) * (v[1] - v[3])), reverse=True)
eyes = sorted(eyes, key=lambda v: abs((v[0] - v[2]) * (v[1] - v[3])), reverse=True)
#----------------------------------------------------------------
# ----------------------------------------------------------------
if len(eyes) >= 2:
#decide left and right eye
# decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
eye_1 = eyes[0]
eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1; right_eye = eye_2
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2; right_eye = eye_1
left_eye = eye_2
right_eye = eye_1
#-----------------------
#find center of eyes
# -----------------------
# find center of eyes
left_eye = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
right_eye = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))
img = FaceDetector.alignment_procedure(img, left_eye, right_eye)
return img #return img anyway
return img # return img anyway
def get_opencv_path():
opencv_home = cv2.__file__
@ -109,4 +120,4 @@ def get_opencv_path():
for folder in folders[1:]:
path = path + "/" + folder
return path+"/data/"
return path + "/data/"

View File

@ -1,42 +1,25 @@
#from retinaface import RetinaFace #this is not a must dependency
import cv2
def build_model():
from retinaface import RetinaFace
from retinaface import RetinaFace # this is not a must dependency
face_detector = RetinaFace.build_model()
return face_detector
def detect_face(face_detector, img, align = True):
from retinaface import RetinaFace
def detect_face(face_detector, img, align=True):
from retinaface import RetinaFace # this is not a must dependency
from retinaface.commons import postprocess
#---------------------------------
# ---------------------------------
resp = []
# The BGR2RGB conversion will be done in the preprocessing step of retinaface.
# img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR
# --------------------------
"""
face = None
img_region = [0, 0, img.shape[1], img.shape[0]] #Really?
obj = RetinaFace.detect_faces(img, model=face_detector, threshold=0.9)
faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)
if len(faces) > 0:
face = faces[0][:, :, ::-1]
return face, img_region
"""
#--------------------------
obj = RetinaFace.detect_faces(img, model = face_detector, threshold = 0.9)
if type(obj) == dict:
for key in obj:
identity = obj[key]
if isinstance(obj, dict):
for identity in obj.items():
facial_area = identity["facial_area"]
y = facial_area[1]
@ -46,18 +29,20 @@ def detect_face(face_detector, img, align = True):
img_region = [x, y, w, h]
confidence = identity["score"]
#detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
# detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1] : facial_area[3], facial_area[0] : facial_area[2]]
if align:
landmarks = identity["landmarks"]
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
nose = landmarks["nose"]
#mouth_right = landmarks["mouth_right"]
#mouth_left = landmarks["mouth_left"]
# mouth_right = landmarks["mouth_right"]
# mouth_left = landmarks["mouth_left"]
detected_face = postprocess.alignment_procedure(detected_face, right_eye, left_eye, nose)
detected_face = postprocess.alignment_procedure(
detected_face, right_eye, left_eye, nose
)
resp.append((detected_face, img_region, confidence))

View File

@ -1,41 +1,42 @@
import gdown
from pathlib import Path
import os
import gdown
import cv2
import pandas as pd
from deepface.detectors import OpenCvWrapper
from deepface.commons import functions
# pylint: disable=line-too-long
def build_model():
home = functions.get_deepface_home()
#model structure
if os.path.isfile(home+'/.deepface/weights/deploy.prototxt') != True:
# model structure
if os.path.isfile(home + "/.deepface/weights/deploy.prototxt") != True:
print("deploy.prototxt will be downloaded...")
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
output = home+'/.deepface/weights/deploy.prototxt'
output = home + "/.deepface/weights/deploy.prototxt"
gdown.download(url, output, quiet=False)
#pre-trained weights
if os.path.isfile(home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel') != True:
# pre-trained weights
if os.path.isfile(home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel") != True:
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
output = home+'/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel'
output = home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
gdown.download(url, output, quiet=False)
face_detector = cv2.dnn.readNetFromCaffe(
home+"/.deepface/weights/deploy.prototxt",
home+"/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
home + "/.deepface/weights/deploy.prototxt",
home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel",
)
eye_detector = OpenCvWrapper.build_cascade("haarcascade_eye")
@ -46,7 +47,8 @@ def build_model():
return detector
def detect_face(detector, img, align = True):
def detect_face(detector, img, align=True):
resp = []
@ -57,42 +59,50 @@ def detect_face(detector, img, align = True):
target_size = (300, 300)
base_img = img.copy() #we will restore base_img to img later
base_img = img.copy() # we will restore base_img to img later
original_size = img.shape
img = cv2.resize(img, target_size)
aspect_ratio_x = (original_size[1] / target_size[1])
aspect_ratio_y = (original_size[0] / target_size[0])
aspect_ratio_x = original_size[1] / target_size[1]
aspect_ratio_y = original_size[0] / target_size[0]
imageBlob = cv2.dnn.blobFromImage(image = img)
imageBlob = cv2.dnn.blobFromImage(image=img)
face_detector = detector["face_detector"]
face_detector.setInput(imageBlob)
detections = face_detector.forward()
detections_df = pd.DataFrame(detections[0][0], columns = ssd_labels)
detections_df = pd.DataFrame(detections[0][0], columns=ssd_labels)
detections_df = detections_df[detections_df['is_face'] == 1] #0: background, 1: face
detections_df = detections_df[detections_df['confidence'] >= 0.90]
detections_df = detections_df[detections_df["is_face"] == 1] # 0: background, 1: face
detections_df = detections_df[detections_df["confidence"] >= 0.90]
detections_df['left'] = (detections_df['left'] * 300).astype(int)
detections_df['bottom'] = (detections_df['bottom'] * 300).astype(int)
detections_df['right'] = (detections_df['right'] * 300).astype(int)
detections_df['top'] = (detections_df['top'] * 300).astype(int)
detections_df["left"] = (detections_df["left"] * 300).astype(int)
detections_df["bottom"] = (detections_df["bottom"] * 300).astype(int)
detections_df["right"] = (detections_df["right"] * 300).astype(int)
detections_df["top"] = (detections_df["top"] * 300).astype(int)
if detections_df.shape[0] > 0:
for index, instance in detections_df.iterrows():
for _, instance in detections_df.iterrows():
left = instance["left"]
right = instance["right"]
bottom = instance["bottom"]
top = instance["top"]
detected_face = base_img[int(top*aspect_ratio_y):int(bottom*aspect_ratio_y), int(left*aspect_ratio_x):int(right*aspect_ratio_x)]
img_region = [int(left*aspect_ratio_x), int(top*aspect_ratio_y), int(right*aspect_ratio_x) - int(left*aspect_ratio_x), int(bottom*aspect_ratio_y) - int(top*aspect_ratio_y)]
detected_face = base_img[
int(top * aspect_ratio_y) : int(bottom * aspect_ratio_y),
int(left * aspect_ratio_x) : int(right * aspect_ratio_x),
]
img_region = [
int(left * aspect_ratio_x),
int(top * aspect_ratio_y),
int(right * aspect_ratio_x) - int(left * aspect_ratio_x),
int(bottom * aspect_ratio_y) - int(top * aspect_ratio_y),
]
confidence = instance["confidence"]
if align:

View File

@ -1,60 +1,63 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# ----------------------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
#url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
# ----------------------------------------
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5'):
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5",
):
model = VGGFace.baseModel()
#--------------------------
# --------------------------
classes = 101
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
#--------------------------
# --------------------------
age_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
# --------------------------
#load weights
# load weights
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/age_model_weights.h5") != True:
print("age_model_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/age_model_weights.h5'
output = home + "/.deepface/weights/age_model_weights.h5"
gdown.download(url, output, quiet=False)
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
age_model.load_weights(home + "/.deepface/weights/age_model_weights.h5")
return age_model
#--------------------------
# --------------------------
def findApparentAge(age_predictions):
output_indexes = np.array([i for i in range(0, 101)])
output_indexes = np.array(list(range(0, 101)))
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age

View File

@ -1,74 +1,72 @@
import os
import gdown
from pathlib import Path
import zipfile
import tensorflow as tf
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# -------------------------------------------
# pylint: disable=line-too-long
# -------------------------------------------
# dependency configuration
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
AveragePooling2D,
Flatten,
Dense,
Dropout,
)
# -------------------------------------------
#url = 'https://drive.google.com/uc?id=13iUHHP3SlNg53qSuQZDdHDSDNdBP9nwy'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5'):
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
):
num_classes = 7
model = Sequential()
#1st convolution layer
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))
model.add(MaxPooling2D(pool_size=(5,5), strides=(2, 2)))
# 1st convolution layer
model.add(Conv2D(64, (5, 5), activation="relu", input_shape=(48, 48, 1)))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
#2nd convolution layer
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
# 2nd convolution layer
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
#3rd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
# 3rd convolution layer
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Flatten())
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
# fully connected neural networks
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.add(Dense(num_classes, activation="softmax"))
#----------------------------
# ----------------------------
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/facial_expression_model_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/facial_expression_model_weights.h5") != True:
print("facial_expression_model_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/facial_expression_model_weights.h5'
output = home + "/.deepface/weights/facial_expression_model_weights.h5"
gdown.download(url, output, quiet=False)
"""
#google drive source downloads zip
output = home+'/.deepface/weights/facial_expression_model_weights.zip'
gdown.download(url, output, quiet=False)
#unzip facial_expression_model_weights.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
"""
model.load_weights(home+'/.deepface/weights/facial_expression_model_weights.h5')
model.load_weights(home + "/.deepface/weights/facial_expression_model_weights.h5")
return model

View File

@ -1,12 +1,14 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
# -------------------------------------
# pylint: disable=line-too-long
# -------------------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
@ -15,40 +17,39 @@ if tf_version == 1:
elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
#url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5'):
# -------------------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
):
model = VGGFace.baseModel()
#--------------------------
# --------------------------
classes = 2
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
#--------------------------
# --------------------------
gender_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
# --------------------------
#load weights
# load weights
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
if os.path.isfile(home + "/.deepface/weights/gender_model_weights.h5") != True:
print("gender_model_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/gender_model_weights.h5'
output = home + "/.deepface/weights/gender_model_weights.h5"
gdown.download(url, output, quiet=False)
gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
gender_model.load_weights(home + "/.deepface/weights/gender_model_weights.h5")
return gender_model
#--------------------------
# --------------------------

View File

@ -1,15 +1,14 @@
from deepface.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import zipfile
import tensorflow as tf
from deepface.basemodels import VGGFace
from deepface.commons import functions
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
# --------------------------
# pylint: disable=line-too-long
# --------------------------
# dependency configurations
tf_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_version == 1:
from keras.models import Model, Sequential
@ -17,49 +16,37 @@ if tf_version == 1:
elif tf_version == 2:
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
#url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5'):
# --------------------------
def loadModel(
url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
):
model = VGGFace.baseModel()
#--------------------------
# --------------------------
classes = 6
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
base_model_output = Activation("softmax")(base_model_output)
#--------------------------
# --------------------------
race_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
# --------------------------
#load weights
# load weights
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
if os.path.isfile(home + "/.deepface/weights/race_model_single_batch.h5") != True:
print("race_model_single_batch.h5 will be downloaded...")
output = home+'/.deepface/weights/race_model_single_batch.h5'
output = home + "/.deepface/weights/race_model_single_batch.h5"
gdown.download(url, output, quiet=False)
"""
#google drive source downloads zip
output = home+'/.deepface/weights/race_model_single_batch.zip'
gdown.download(url, output, quiet=False)
#unzip race_model_single_batch.zip
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(home+'/.deepface/weights/')
"""
race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')
race_model.load_weights(home + "/.deepface/weights/race_model_single_batch.h5")
return race_model
#--------------------------

View File

@ -1,266 +0,0 @@
import pandas as pd
import numpy as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc, roc_auc_score
import matplotlib.pyplot as plt
from tqdm import tqdm
tqdm.pandas()
#--------------------------
#Data set
# Ref: https://github.com/serengil/deepface/tree/master/tests/dataset
idendities = {
"Angelina": ["img1.jpg", "img2.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img10.jpg", "img11.jpg"],
"Scarlett": ["img8.jpg", "img9.jpg", "img47.jpg", "img48.jpg", "img49.jpg", "img50.jpg", "img51.jpg"],
"Jennifer": ["img3.jpg", "img12.jpg", "img53.jpg", "img54.jpg", "img55.jpg", "img56.jpg"],
"Mark": ["img13.jpg", "img14.jpg", "img15.jpg", "img57.jpg", "img58.jpg"],
"Jack": ["img16.jpg", "img17.jpg", "img59.jpg", "img61.jpg", "img62.jpg"],
"Elon": ["img18.jpg", "img19.jpg", "img67.jpg"],
"Jeff": ["img20.jpg", "img21.jpg"],
"Marissa": ["img22.jpg", "img23.jpg"],
"Sundar": ["img24.jpg", "img25.jpg"],
"Katy": ["img26.jpg", "img27.jpg", "img28.jpg", "img42.jpg", "img43.jpg", "img44.jpg", "img45.jpg", "img46.jpg"],
"Matt": ["img29.jpg", "img30.jpg", "img31.jpg", "img32.jpg", "img33.jpg"],
"Leonardo": ["img34.jpg", "img35.jpg", "img36.jpg", "img37.jpg"],
"George": ["img38.jpg", "img39.jpg", "img40.jpg", "img41.jpg"]
}
#--------------------------
#Positives
positives = []
for key, values in idendities.items():
#print(key)
for i in range(0, len(values)-1):
for j in range(i+1, len(values)):
#print(values[i], " and ", values[j])
positive = []
positive.append(values[i])
positive.append(values[j])
positives.append(positive)
positives = pd.DataFrame(positives, columns = ["file_x", "file_y"])
positives["decision"] = "Yes"
print(positives.shape)
#--------------------------
#Negatives
samples_list = list(idendities.values())
negatives = []
for i in range(0, len(idendities) - 1):
for j in range(i+1, len(idendities)):
#print(samples_list[i], " vs ",samples_list[j])
cross_product = itertools.product(samples_list[i], samples_list[j])
cross_product = list(cross_product)
#print(cross_product)
for cross_sample in cross_product:
#print(cross_sample[0], " vs ", cross_sample[1])
negative = []
negative.append(cross_sample[0])
negative.append(cross_sample[1])
negatives.append(negative)
negatives = pd.DataFrame(negatives, columns = ["file_x", "file_y"])
negatives["decision"] = "No"
negatives = negatives.sample(positives.shape[0])
print(negatives.shape)
#--------------------------
#Merge positive and negative ones
df = pd.concat([positives, negatives]).reset_index(drop = True)
print(df.decision.value_counts())
df.file_x = "dataset/"+df.file_x
df.file_y = "dataset/"+df.file_y
#--------------------------
#DeepFace
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
instances = df[["file_x", "file_y"]].values.tolist()
models = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
metrics = ['cosine', 'euclidean_l2']
if True:
pretrained_models = {}
pretrained_models["VGG-Face"] = VGGFace.loadModel()
print("VGG-Face loaded")
pretrained_models["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
pretrained_models["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")
for model in models:
for metric in metrics:
resp_obj = DeepFace.verify(instances
, model_name = model
, model = pretrained_models[model]
, distance_metric = metric
, enforce_detection = False)
distances = []
for i in range(0, len(instances)):
distance = round(resp_obj["pair_%s" % (i+1)]["distance"], 4)
distances.append(distance)
df['%s_%s' % (model, metric)] = distances
df.to_csv("face-recognition-pivot.csv", index = False)
else:
df = pd.read_csv("face-recognition-pivot.csv")
df_raw = df.copy()
#--------------------------
#Distribution
fig = plt.figure(figsize=(15, 15))
figure_idx = 1
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
ax1 = fig.add_subplot(len(models) * len(metrics), len(metrics), figure_idx)
df[df.decision == "Yes"][feature].plot(kind='kde', title = feature, label = 'Yes', legend = True)
df[df.decision == "No"][feature].plot(kind='kde', title = feature, label = 'No', legend = True)
figure_idx = figure_idx + 1
plt.show()
#--------------------------
#Pre-processing for modelling
columns = []
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
columns.append(feature)
columns.append("decision")
df = df[columns]
df.loc[df[df.decision == 'Yes'].index, 'decision'] = 1
df.loc[df[df.decision == 'No'].index, 'decision'] = 0
print(df.head())
#--------------------------
#Train test split
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.30, random_state=17)
target_name = "decision"
y_train = df_train[target_name].values
x_train = df_train.drop(columns=[target_name]).values
y_test = df_test[target_name].values
x_test = df_test.drop(columns=[target_name]).values
#print("target distribution:")
#print(df_train[target_name].value_counts())
#print(df_test[target_name].value_counts())
#--------------------------
#LightGBM
import lightgbm as lgb
features = df.drop(columns=[target_name]).columns.tolist()
lgb_train = lgb.Dataset(x_train, y_train, feature_name = features)
lgb_test = lgb.Dataset(x_test, y_test, feature_name = features)
params = {
'task': 'train'
, 'boosting_type': 'gbdt'
, 'objective': 'multiclass'
, 'num_class': 2
, 'metric': 'multi_logloss'
}
gbm = lgb.train(params, lgb_train, num_boost_round=500, early_stopping_rounds = 50, valid_sets=lgb_test)
gbm.save_model("face-recognition-ensemble-model.txt")
#--------------------------
#Evaluation
predictions = gbm.predict(x_test)
prediction_classes = []
for prediction in predictions:
prediction_class = np.argmax(prediction)
prediction_classes.append(prediction_class)
y_test = list(y_test)
cm = confusion_matrix(y_test, prediction_classes)
print(cm)
tn, fp, fn, tp = cm.ravel()
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn)/(tn + fp + fn + tp)
f1 = 2 * (precision * recall) / (precision + recall)
print("Precision: ", 100*precision,"%")
print("Recall: ", 100*recall,"%")
print("F1 score ",100*f1, "%")
print("Accuracy: ", 100*accuracy,"%")
#--------------------------
#Interpretability
ax = lgb.plot_importance(gbm, max_num_features=20)
plt.show()
"""
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
plt.rcParams["figure.figsize"] = [20, 20]
for i in range(0, gbm.num_trees()):
ax = lgb.plot_tree(gbm, tree_index = i)
plt.show()
if i == 2:
break
"""
#--------------------------
#ROC Curve
y_pred_proba = predictions[::,1]
fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
auc = roc_auc_score(y_test, y_pred_proba)
plt.figure(figsize=(7,3))
plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
#--------------------------

View File

@ -1,50 +1,47 @@
#!pip install deepface
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.commons import functions
import matplotlib.pyplot as plt
import numpy as np
from deepface.basemodels import VGGFace
from deepface.commons import functions
#----------------------------------------------
#build face recognition model
# ----------------------------------------------
# build face recognition model
model = VGGFace.loadModel()
#model = Facenet.loadModel()
#model = OpenFace.loadModel()
#model = FbDeepFace.loadModel()
try:
input_shape = model.layers[0].input_shape[1:3]
except: #issue 470
except: # issue 470
input_shape = model.layers[0].input_shape[0][1:3]
print("model input shape: ", model.layers[0].input_shape[1:])
print("model output shape: ", model.layers[-1].input_shape[-1])
#----------------------------------------------
#load images and find embeddings
# ----------------------------------------------
# load images and find embeddings
#img1 = functions.detectFace("dataset/img1.jpg", input_shape)
# img1 = functions.detectFace("dataset/img1.jpg", input_shape)
img1 = functions.preprocess_face("dataset/img1.jpg", input_shape)
img1_representation = model.predict(img1)[0,:]
img1_representation = model.predict(img1)[0, :]
#img2 = functions.detectFace("dataset/img3.jpg", input_shape)
# img2 = functions.detectFace("dataset/img3.jpg", input_shape)
img2 = functions.preprocess_face("dataset/img3.jpg", input_shape)
img2_representation = model.predict(img2)[0,:]
img2_representation = model.predict(img2)[0, :]
#----------------------------------------------
#distance between two images
# ----------------------------------------------
# distance between two images
distance_vector = np.square(img1_representation - img2_representation)
#print(distance_vector)
# print(distance_vector)
distance = np.sqrt(distance_vector.sum())
print("Euclidean distance: ",distance)
print("Euclidean distance: ", distance)
#----------------------------------------------
#expand vectors to be shown better in graph
# ----------------------------------------------
# expand vectors to be shown better in graph
img1_graph = []; img2_graph = []; distance_graph = []
img1_graph = []
img2_graph = []
distance_graph = []
for i in range(0, 200):
img1_graph.append(img1_representation)
@ -55,35 +52,35 @@ img1_graph = np.array(img1_graph)
img2_graph = np.array(img2_graph)
distance_graph = np.array(distance_graph)
#----------------------------------------------
#plotting
# ----------------------------------------------
# plotting
fig = plt.figure()
ax1 = fig.add_subplot(3,2,1)
plt.imshow(img1[0][:,:,::-1])
plt.axis('off')
ax1 = fig.add_subplot(3, 2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.axis("off")
ax2 = fig.add_subplot(3,2,2)
im = plt.imshow(img1_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax2 = fig.add_subplot(3, 2, 2)
im = plt.imshow(img1_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
ax3 = fig.add_subplot(3,2,3)
plt.imshow(img2[0][:,:,::-1])
plt.axis('off')
ax3 = fig.add_subplot(3, 2, 3)
plt.imshow(img2[0][:, :, ::-1])
plt.axis("off")
ax4 = fig.add_subplot(3,2,4)
im = plt.imshow(img2_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax4 = fig.add_subplot(3, 2, 4)
im = plt.imshow(img2_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
ax5 = fig.add_subplot(3,2,5)
plt.text(0.35, 0, "Distance: %s" % (distance))
plt.axis('off')
ax5 = fig.add_subplot(3, 2, 5)
plt.text(0.35, 0, f"Distance: {distance}")
plt.axis("off")
ax6 = fig.add_subplot(3,2,6)
im = plt.imshow(distance_graph, interpolation='nearest', cmap=plt.cm.ocean)
ax6 = fig.add_subplot(3, 2, 6)
im = plt.imshow(distance_graph, interpolation="nearest", cmap=plt.cm.ocean)
plt.colorbar()
plt.show()
#----------------------------------------------
# ----------------------------------------------

View File

@ -9,12 +9,13 @@ from deepface import DeepFace
print("-----------------------------------------")
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf_major_version = int(tf.__version__.split(".")[0])
tf_major_version = int(tf.__version__.split(".", maxsplit=1)[0])
if tf_major_version == 2:
import logging
tf.get_logger().setLevel(logging.ERROR)
print("Running unit tests for TF ", tf.__version__)
@ -22,39 +23,42 @@ print("Running unit tests for TF ", tf.__version__)
print("-----------------------------------------")
expected_coverage = 97
num_cases = 0; succeed_cases = 0
num_cases = 0
succeed_cases = 0
def evaluate(condition):
global num_cases, succeed_cases
if condition is True:
if condition == True:
succeed_cases += 1
num_cases += 1
# ------------------------------------------------
detectors = ['opencv', 'mtcnn']
models = ['VGG-Face', 'Facenet', 'ArcFace']
metrics = ['cosine', 'euclidean', 'euclidean_l2']
detectors = ["opencv", "mtcnn"]
models = ["VGG-Face", "Facenet", "ArcFace"]
metrics = ["cosine", "euclidean", "euclidean_l2"]
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True],
['dataset/img6.jpg', 'dataset/img7.jpg', True],
['dataset/img8.jpg', 'dataset/img9.jpg', True],
['dataset/img1.jpg', 'dataset/img11.jpg', True],
['dataset/img2.jpg', 'dataset/img11.jpg', True],
['dataset/img1.jpg', 'dataset/img3.jpg', False],
['dataset/img2.jpg', 'dataset/img3.jpg', False],
['dataset/img6.jpg', 'dataset/img8.jpg', False],
['dataset/img6.jpg', 'dataset/img9.jpg', False],
["dataset/img1.jpg", "dataset/img2.jpg", True],
["dataset/img5.jpg", "dataset/img6.jpg", True],
["dataset/img6.jpg", "dataset/img7.jpg", True],
["dataset/img8.jpg", "dataset/img9.jpg", True],
["dataset/img1.jpg", "dataset/img11.jpg", True],
["dataset/img2.jpg", "dataset/img11.jpg", True],
["dataset/img1.jpg", "dataset/img3.jpg", False],
["dataset/img2.jpg", "dataset/img3.jpg", False],
["dataset/img6.jpg", "dataset/img8.jpg", False],
["dataset/img6.jpg", "dataset/img9.jpg", False],
]
print("-----------------------------------------")
def test_cases():
print("Enforce detection test")
@ -88,7 +92,7 @@ def test_cases():
assert "w" in objs[0]["facial_area"].keys()
assert "h" in objs[0]["facial_area"].keys()
assert isinstance(objs[0]["embedding"], list)
assert len(objs[0]["embedding"]) == 2622 #embedding of VGG-Face
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face
except:
exception_thrown = True
@ -121,7 +125,7 @@ def test_cases():
print("Extract faces test")
for detector in detectors:
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend = detector)
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector)
for img_obj in img_objs:
assert "face" in img_obj.keys()
assert "facial_area" in img_obj.keys()
@ -134,7 +138,7 @@ def test_cases():
img = img_obj["face"]
evaluate(img.shape[0] > 0 and img.shape[1] > 0)
print(detector," test is done")
print(detector, " test is done")
print("-----------------------------------------")
@ -151,7 +155,7 @@ def test_cases():
for detector in detectors:
print(detector + " detector")
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend = detector)
res = DeepFace.verify(dataset[0][0], dataset[0][1], detector_backend=detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
@ -180,7 +184,7 @@ def test_cases():
print("Find function test")
dfs = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset")
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
for df in dfs:
assert isinstance(df, pd.DataFrame)
print(df.head())
@ -200,11 +204,11 @@ def test_cases():
print("-----------------------------------------")
print("Facial analysis test. Passing all to the action")
demography_objs = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])
demography_objs = DeepFace.analyze(img, ["age", "gender", "race", "emotion"])
for demography in demography_objs:
#print(f"Demography: {demography}")
#check response is a valid json
# print(f"Demography: {demography}")
# check response is a valid json
print("Age: ", demography["age"])
print("Gender: ", demography["dominant_gender"])
print("Race: ", demography["dominant_race"])
@ -218,7 +222,7 @@ def test_cases():
print("-----------------------------------------")
print("Facial analysis test 2. Remove some actions and check they are not computed")
demography_objs = DeepFace.analyze(img, ['age', 'gender'])
demography_objs = DeepFace.analyze(img, ["age", "gender"])
for demography in demography_objs:
print("Age: ", demography.get("age"))
@ -242,9 +246,7 @@ def test_cases():
img2 = instance[1]
result = instance[2]
resp_obj = DeepFace.verify(img1, img2
, model_name = model
, distance_metric = metric)
resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
@ -264,7 +266,23 @@ def test_cases():
else:
classified_label = "unverified"
print(img1.split("/")[-1], "-", img2.split("/")[-1], classified_label, "as same person based on", model,"and",metric,". Distance:",distance,", Threshold:", threshold,"(",test_result_label,")")
print(
img1.split("/", maxsplit=1)[-1],
"-",
img2.split("/", maxsplit=1)[-1],
classified_label,
"as same person based on",
model,
"and",
metric,
". Distance:",
distance,
", Threshold:",
threshold,
"(",
test_result_label,
")",
)
print("--------------------------")
@ -297,7 +315,7 @@ def test_cases():
img1 = cv2.imread("dataset/img1.jpg")
dfs = DeepFace.find(img1, db_path = "dataset")
dfs = DeepFace.find(img1, db_path="dataset")
for df in dfs:
print(df.head())
@ -307,23 +325,29 @@ def test_cases():
print("non-binary gender tests")
#interface validation - no need to call evaluate here
# interface validation - no need to call evaluate here
for img1_path, img2_path, verified in dataset:
for img1_path, _, _ in dataset:
for detector in detectors:
results = DeepFace.analyze(img1_path, actions=('gender',), detector_backend=detector, enforce_detection=False)
results = DeepFace.analyze(
img1_path, actions=("gender",), detector_backend=detector, enforce_detection=False
)
for result in results:
print(result)
assert 'gender' in result.keys()
assert 'dominant_gender' in result.keys() and result["dominant_gender"] in ["Man", "Woman"]
assert "gender" in result.keys()
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
"Man",
"Woman",
]
if result["dominant_gender"] == "Man":
assert result["gender"]["Man"] > result["gender"]["Woman"]
else:
assert result["gender"]["Man"] < result["gender"]["Woman"]
# ---------------------------------------------
test_cases()