mirror of
https://github.com/serengil/deepface.git
synced 2025-06-07 12:05:22 +00:00
add web app (incomplete)
This commit is contained in:
parent
f789a9c8ce
commit
11a92a9b27
3
.env
Normal file
3
.env
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
export HAP_DB_NAME=hap
|
||||||
|
export HAP_DB_HOST=localhost
|
||||||
|
export HAP_DB_PORT=27017
|
23
app.py
Normal file
23
app.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
from config import create_app, db
|
||||||
|
from views import public_route_bp
|
||||||
|
from controller import api_bp_new, api_bp_old
|
||||||
|
|
||||||
|
|
||||||
|
app = create_app()
|
||||||
|
"""
|
||||||
|
register blueprint
|
||||||
|
"""
|
||||||
|
app.register_blueprint(public_route_bp)
|
||||||
|
app.register_blueprint(api_bp_old)
|
||||||
|
app.register_blueprint(api_bp_new, url_prefix='/api')
|
||||||
|
|
||||||
|
app.app_context().push()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
app.run(debug=True, host="0.0.0.0", port=5000) # RUNNING APP MAKE debug =FALSE for Production Env
|
||||||
|
# app.run(debug=True, host="localhost", port=8888)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
50
config.py
Normal file
50
config.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
from flask import Flask
|
||||||
|
from flask_mongoengine import MongoEngine
|
||||||
|
from deepface import DeepFaceLite
|
||||||
|
# import tensorflow as tf
|
||||||
|
# from tensorflow.python.keras.backend import set_session
|
||||||
|
import os
|
||||||
|
|
||||||
|
"""
|
||||||
|
initialize the ML engine
|
||||||
|
"""
|
||||||
|
db = MongoEngine()
|
||||||
|
# sess = tf.Session()
|
||||||
|
# graph = tf.get_default_graph()
|
||||||
|
# set_session(sess)
|
||||||
|
deepface = DeepFaceLite()
|
||||||
|
|
||||||
|
"""
|
||||||
|
database environment variable
|
||||||
|
"""
|
||||||
|
DB_NAME = os.environ.get('HAP_DB_NAME')
|
||||||
|
DB_HOST = os.environ.get('HAP_DB_HOST')
|
||||||
|
DB_PORT = os.environ.get('HAP_DB_PORT')
|
||||||
|
DB_USERNAME = os.environ.get('HAP_DB_USERNAME')
|
||||||
|
DB_PASSWORD = os.environ.get('HAP_DB_PASSWORD')
|
||||||
|
|
||||||
|
|
||||||
|
def create_app():
|
||||||
|
"""
|
||||||
|
return app instance
|
||||||
|
:return: app instance
|
||||||
|
"""
|
||||||
|
app = Flask(__name__)
|
||||||
|
if DB_USERNAME is not None and DB_PASSWORD is not None:
|
||||||
|
app.config['MONGODB_SETTINGS'] = {
|
||||||
|
'db': DB_NAME,
|
||||||
|
'host': DB_HOST,
|
||||||
|
'port': int(DB_PORT),
|
||||||
|
'username': DB_USERNAME,
|
||||||
|
'password': DB_PASSWORD
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
app.config['MONGODB_SETTINGS'] = {
|
||||||
|
'db': DB_NAME,
|
||||||
|
'host': DB_HOST,
|
||||||
|
'port': int(DB_PORT)
|
||||||
|
}
|
||||||
|
|
||||||
|
db.init_app(app)
|
||||||
|
|
||||||
|
return app
|
24
controller/__init__.py
Normal file
24
controller/__init__.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
from flask_restplus import Api
|
||||||
|
from flask import Blueprint
|
||||||
|
from controller.prediction_controller import api as predict_apis
|
||||||
|
from controller.feedback_controller import api as feedback_apis
|
||||||
|
from controller.general_controller import api as general_apis
|
||||||
|
|
||||||
|
|
||||||
|
# old api, will be discard later
|
||||||
|
api_bp_old = Blueprint('api_old', __name__)
|
||||||
|
|
||||||
|
api = Api(api_bp_old)
|
||||||
|
|
||||||
|
api.add_namespace(predict_apis)
|
||||||
|
api.add_namespace(general_apis)
|
||||||
|
|
||||||
|
"""
|
||||||
|
new api with /api/ path
|
||||||
|
"""
|
||||||
|
api_bp_new = Blueprint('api_new', __name__)
|
||||||
|
|
||||||
|
api_new = Api(api_bp_new)
|
||||||
|
api_new.add_namespace(general_apis)
|
||||||
|
api_new.add_namespace(predict_apis)
|
||||||
|
api_new.add_namespace(feedback_apis)
|
32
controller/feedback_controller.py
Normal file
32
controller/feedback_controller.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
from flask import request
|
||||||
|
from extension.utilServices import send_json_response
|
||||||
|
from flask_restplus import Namespace, Resource
|
||||||
|
import datetime
|
||||||
|
api = Namespace('feedback', path='/feedback', description='prediction related operations')
|
||||||
|
|
||||||
|
|
||||||
|
@api.route('')
|
||||||
|
class Feedback(Resource):
|
||||||
|
@api.doc('feedback related api')
|
||||||
|
def post(self):
|
||||||
|
"""
|
||||||
|
store user feedbacks to the database
|
||||||
|
:return: operation results
|
||||||
|
"""
|
||||||
|
from models.feedback import Feedback, FeedbackContent
|
||||||
|
|
||||||
|
message = request.get_json(force=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
new_feedback = Feedback()
|
||||||
|
new_feedback_content = []
|
||||||
|
for each_content in message['data']:
|
||||||
|
feedback_content = FeedbackContent(**each_content)
|
||||||
|
new_feedback_content.append(feedback_content)
|
||||||
|
|
||||||
|
new_feedback.content = new_feedback_content
|
||||||
|
new_feedback.date = datetime.datetime.now()
|
||||||
|
new_feedback.save()
|
||||||
|
return send_json_response({}, 200)
|
||||||
|
except Exception as error:
|
||||||
|
return send_json_response({'msg': str(error)}, 500)
|
190
controller/general_controller.py
Normal file
190
controller/general_controller.py
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
from extension.utilServices import (send_json_response)
|
||||||
|
from extension.constants import CONSTANTS
|
||||||
|
import csv
|
||||||
|
import re
|
||||||
|
from flask_restplus import Namespace, Resource
|
||||||
|
import datetime
|
||||||
|
import copy
|
||||||
|
|
||||||
|
api = Namespace('general', path='/', description='general information related to app')
|
||||||
|
|
||||||
|
|
||||||
|
# @api.route('/get-breed-info')
|
||||||
|
# class BreedList(Resource):
|
||||||
|
# @api.doc('get breed information from csv')
|
||||||
|
# def get(self):
|
||||||
|
# """
|
||||||
|
# return the breed information, which gets from wikipedia
|
||||||
|
# :return: list of breed information
|
||||||
|
# """
|
||||||
|
# breed_info = {}
|
||||||
|
# for animal_type in CONSTANTS['ANIMAL_TYPE']:
|
||||||
|
# with open('wikiFile/' + animal_type + '.csv') as csv_file:
|
||||||
|
# csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
|
# line_count = 0
|
||||||
|
# json_title = []
|
||||||
|
# animal_list = []
|
||||||
|
# for row in csv_reader:
|
||||||
|
# if line_count == 0:
|
||||||
|
# json_title = row
|
||||||
|
# line_count += 1
|
||||||
|
# else:
|
||||||
|
# info_obj = {
|
||||||
|
# json_title[0]: row[0],
|
||||||
|
# json_title[1]: row[1],
|
||||||
|
# json_title[2]: row[2],
|
||||||
|
# json_title[3]: row[3],
|
||||||
|
# json_title[4]: row[4],
|
||||||
|
# }
|
||||||
|
# animal_list.append(info_obj)
|
||||||
|
# line_count += 1
|
||||||
|
# csv_file.close()
|
||||||
|
# breed_info[animal_type] = animal_list
|
||||||
|
|
||||||
|
# return send_json_response(breed_info, 200)
|
||||||
|
|
||||||
|
|
||||||
|
@api.route('/get-app-info')
|
||||||
|
class AppInfo(Resource):
|
||||||
|
@api.doc('return the app developer information')
|
||||||
|
def get(self):
|
||||||
|
"""
|
||||||
|
return the footer information
|
||||||
|
:return: return the app related information
|
||||||
|
"""
|
||||||
|
app_info = {
|
||||||
|
'developedBy': 'This app was developed by the Melbourne eResearch Group (www.eresearch.unimelb.edu.au) within the School of Computing and Information Systems (https://cis.unimelb.edu.au) at The University of Melbourne (www.unimelb.edu.au). ',
|
||||||
|
'description': 'The app uses artificial intelligence (convolutional neural networks) that have been trained on dog/cat images to identify whether a dog/cat is in an image, and if so the species type (breed) and it\'s emotion.',
|
||||||
|
'contact': 'https://eresearch.unimelb.edu.au',
|
||||||
|
'developedByHTML': '<p>This app was developed by the Melbourne eResearch Group (<a href=\"www.eresearch.unimelb.edu.au\" target=\"_blank\">www.eresearch.unimelb.edu.au</a>) within the School of Computing and Information Systems (<a href=\"https://cis.unimelb.edu.au\" target=\"_blank\">https://cis.unimelb.edu.au</a>) at The University of Melbourne (<a href=\"www.unimelb.edu.au\" target=\"_blank\">www.unimelb.edu.au</a>).</p>',
|
||||||
|
'descriptionHTML': '<p>The app uses artificial intelligence (convolutional neural networks) that have been trained on dog/cat images to identify whether a dog/cat is in an image, and if so the species type (breed) and it\'s emotion.</p>',
|
||||||
|
'contactHTML': '<p>Please contact us at: <a href=\"eresearch.unimelb.edu.au\" target=\"_blank\">eresearch.unimelb.edu.au</a></p>'
|
||||||
|
}
|
||||||
|
|
||||||
|
return send_json_response(app_info, 200)
|
||||||
|
|
||||||
|
|
||||||
|
@api.route('/get-statistical-results')
|
||||||
|
class StatisticalData(Resource):
|
||||||
|
@api.doc('return statistical data')
|
||||||
|
def get(self):
|
||||||
|
"""
|
||||||
|
return the statistical information
|
||||||
|
query across feedback and prediction model
|
||||||
|
:return: return statistical information
|
||||||
|
"""
|
||||||
|
from models.prediction import Prediction
|
||||||
|
from models.feedback import Feedback
|
||||||
|
|
||||||
|
total_cat_breed = []
|
||||||
|
total_dog_breed = []
|
||||||
|
prediction_collection = Prediction._get_collection()
|
||||||
|
feedback_collection = Feedback._get_collection()
|
||||||
|
statistical_data = copy.deepcopy(CONSTANTS['STATISTICAL_DATA'])
|
||||||
|
|
||||||
|
# initial statistical_data based on breed and alpha order
|
||||||
|
f = open("model_data/pet_classes.txt", "r")
|
||||||
|
for each_line in f:
|
||||||
|
first_char = each_line[0]
|
||||||
|
if first_char.isupper():
|
||||||
|
total_cat_breed.append(re.sub(r"_", " ", each_line.rstrip(), flags=re.IGNORECASE).title())
|
||||||
|
else:
|
||||||
|
total_dog_breed.append(re.sub(r"_", " ", each_line.rstrip(), flags=re.IGNORECASE).title())
|
||||||
|
f.close()
|
||||||
|
total_cat_breed.sort()
|
||||||
|
total_dog_breed.sort()
|
||||||
|
|
||||||
|
for each_breed in total_cat_breed:
|
||||||
|
statistical_data['Cat']['prediction_data']['breed'][each_breed] = 0
|
||||||
|
statistical_data['Cat']['feedback_data']['breed']['wrong'][each_breed] = 0
|
||||||
|
statistical_data['Cat']['feedback_data']['breed']['correct'][each_breed] = 0
|
||||||
|
|
||||||
|
for each_breed in total_dog_breed:
|
||||||
|
statistical_data['Dog']['prediction_data']['breed'][each_breed] = 0
|
||||||
|
statistical_data['Dog']['feedback_data']['breed']['wrong'][each_breed] = 0
|
||||||
|
statistical_data['Dog']['feedback_data']['breed']['correct'][each_breed] = 0
|
||||||
|
|
||||||
|
# photo by date
|
||||||
|
today = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
total_number_of_photo_within_one_week = {
|
||||||
|
(today - datetime.timedelta(days=6)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today - datetime.timedelta(days=5), '$gte': today - datetime.timedelta(days=6)}}),
|
||||||
|
(today - datetime.timedelta(days=5)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today - datetime.timedelta(days=4), '$gte': today - datetime.timedelta(days=5)}}),
|
||||||
|
(today - datetime.timedelta(days=4)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today - datetime.timedelta(days=3), '$gte': today - datetime.timedelta(days=4)}}),
|
||||||
|
(today - datetime.timedelta(days=3)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today - datetime.timedelta(days=2), '$gte': today - datetime.timedelta(days=3)}}),
|
||||||
|
(today - datetime.timedelta(days=2)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today - datetime.timedelta(days=1), '$gte': today - datetime.timedelta(days=2)}}),
|
||||||
|
(today - datetime.timedelta(days=1)).strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$lt': today, '$gte': today - datetime.timedelta(days=1)}}),
|
||||||
|
today.strftime('%d/%m/%Y'): prediction_collection.count_documents(
|
||||||
|
{'date': {'$gte': today}}),
|
||||||
|
}
|
||||||
|
|
||||||
|
# prediction
|
||||||
|
total_prediction = prediction_collection.find({})
|
||||||
|
for each_prediction in total_prediction:
|
||||||
|
prediction_results = each_prediction['predictionResults']
|
||||||
|
for each_result in prediction_results:
|
||||||
|
statistical_data[each_result['type']]['prediction_number'] = statistical_data[each_result['type']].get(
|
||||||
|
'prediction_number', 0) + 1
|
||||||
|
statistical_data[each_result['type']]['prediction_data']['breed'][each_result['breed']] = \
|
||||||
|
statistical_data[each_result['type']]['prediction_data']['breed'].get(each_result['breed'], 0) + 1
|
||||||
|
statistical_data[each_result['type']]['prediction_data']['emotion'][each_result['emotion']] = \
|
||||||
|
statistical_data[each_result['type']]['prediction_data']['emotion'].get(each_result['emotion'],
|
||||||
|
0) + 1
|
||||||
|
# feedback
|
||||||
|
total_feedback = feedback_collection.find({})
|
||||||
|
|
||||||
|
for each_feedback in total_feedback:
|
||||||
|
feedback_content = each_feedback['content']
|
||||||
|
for each_content in feedback_content:
|
||||||
|
statistical_data[each_content['type']]['feedback_number'] = statistical_data[each_content['type']].get(
|
||||||
|
'feedback_number', 0) + 1
|
||||||
|
|
||||||
|
if not each_content['breedCorrectness']:
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['breed']['wrong'][
|
||||||
|
each_content['breedFeedback']] = \
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['breed'][
|
||||||
|
'wrong'].get(each_content['breedFeedback'], 0) + 1
|
||||||
|
else:
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['breed']['correct'][
|
||||||
|
each_content['breedFeedback']] = \
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['breed']['correct'].get(
|
||||||
|
each_content['breedFeedback'], 0) + 1
|
||||||
|
|
||||||
|
if not each_content['emotionCorrectness']:
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['emotion']['wrong'][
|
||||||
|
each_content['emotionFeedback']] = \
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['emotion']['wrong'].get(
|
||||||
|
each_content['emotionFeedback'], 0) + 1
|
||||||
|
else:
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['emotion']['correct'][
|
||||||
|
each_content['emotionFeedback']] = \
|
||||||
|
statistical_data[each_content['type']]['feedback_data']['emotion']['correct'].get(
|
||||||
|
each_content['emotionFeedback'], 0) + 1
|
||||||
|
|
||||||
|
result = {
|
||||||
|
'totalNumberOfPhotoUploaded': prediction_collection.count_documents({}),
|
||||||
|
'totalNumberOfCatPrediction': statistical_data['Cat']['prediction_number'],
|
||||||
|
'totalNumberOfDogPrediction': statistical_data['Dog']['prediction_number'],
|
||||||
|
'totalNumberOfCatFeedback': statistical_data['Cat']['feedback_number'],
|
||||||
|
'totalNumberOfDogFeedback': statistical_data['Dog']['feedback_number'],
|
||||||
|
'totalNumberOfDogBreedPrediction': statistical_data['Dog']['prediction_data']['breed'],
|
||||||
|
'totalNumberOfDogBreedCorrectFeedback': statistical_data['Dog']['feedback_data']['breed']['correct'],
|
||||||
|
'totalNumberOfDogBreedWrongFeedback': statistical_data['Dog']['feedback_data']['breed']['wrong'],
|
||||||
|
'totalNumberOfDogEmotionPrediction': statistical_data['Dog']['prediction_data']['emotion'],
|
||||||
|
'totalNumberOfDogEmotionCorrectFeedback': statistical_data['Dog']['feedback_data']['emotion']['correct'],
|
||||||
|
'totalNumberOfDogEmotionWrongFeedback': statistical_data['Dog']['feedback_data']['emotion']['wrong'],
|
||||||
|
'totalNumberOfCatBreedPrediction': statistical_data['Cat']['prediction_data']['breed'],
|
||||||
|
'totalNumberOfCatBreedCorrectFeedback': statistical_data['Cat']['feedback_data']['breed']['correct'],
|
||||||
|
'totalNumberOfCatBreedWrongFeedback': statistical_data['Cat']['feedback_data']['breed']['wrong'],
|
||||||
|
'totalNumberOfCatEmotionPrediction': statistical_data['Cat']['prediction_data']['emotion'],
|
||||||
|
'totalNumberOfCatEmotionCorrectFeedback': statistical_data['Cat']['feedback_data']['emotion']['correct'],
|
||||||
|
'totalNumberOfCatEmotionWrongFeedback': statistical_data['Cat']['feedback_data']['emotion']['wrong'],
|
||||||
|
'numberOfPhotoByDate': total_number_of_photo_within_one_week
|
||||||
|
}
|
||||||
|
|
||||||
|
return send_json_response(result, 200)
|
73
controller/prediction_controller.py
Normal file
73
controller/prediction_controller.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
from flask import request
|
||||||
|
import base64
|
||||||
|
from PIL import Image
|
||||||
|
import io
|
||||||
|
import datetime
|
||||||
|
from extension.utilServices import send_json_response
|
||||||
|
from flask_restplus import Namespace, Resource
|
||||||
|
|
||||||
|
|
||||||
|
api = Namespace('predict', path='/predict', description='prediction related operations')
|
||||||
|
|
||||||
|
|
||||||
|
@api.route('')
|
||||||
|
class Prediction(Resource):
|
||||||
|
@api.doc('make prediction')
|
||||||
|
def post(self):
|
||||||
|
"""
|
||||||
|
return prediction results and save it to the database
|
||||||
|
:return: prediction results
|
||||||
|
"""
|
||||||
|
from models.prediction import Prediction
|
||||||
|
from config import deepface
|
||||||
|
message = request.get_json(force=True)
|
||||||
|
encoded = message['image']
|
||||||
|
decoded = base64.b64decode(encoded)
|
||||||
|
image = Image.open(io.BytesIO(decoded)).convert('RGB')
|
||||||
|
|
||||||
|
img, detections = deepface.analyze(image)
|
||||||
|
|
||||||
|
# TODO: handle outputs
|
||||||
|
# encode image and jsonify detections
|
||||||
|
buffered = io.BytesIO()
|
||||||
|
img.save(buffered, format="JPEG")
|
||||||
|
img_str = base64.b64encode(buffered.getvalue())
|
||||||
|
base64_string = img_str.decode('utf-8')
|
||||||
|
|
||||||
|
result = {
|
||||||
|
'img_str': base64_string,
|
||||||
|
'results': detections,
|
||||||
|
'message': '',
|
||||||
|
'status': 'success'
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(detections) == 0:
|
||||||
|
result['message'] = "We’re not very sure of what this may be, could you try with another image",
|
||||||
|
result['status'] = 'failure'
|
||||||
|
elif len(detections) == 1:
|
||||||
|
result['isShowId'] = 'false'
|
||||||
|
|
||||||
|
if len(detections) > 0:
|
||||||
|
formatted_prediction_results = []
|
||||||
|
for each in detections:
|
||||||
|
age = each['age']
|
||||||
|
gender = each['gender']
|
||||||
|
emotion = each['emotion']['dominant']
|
||||||
|
emotion_score = each['emotion']['dominant_score']
|
||||||
|
formatted_prediction_results.append({
|
||||||
|
'age': age,
|
||||||
|
'gender': gender,
|
||||||
|
'emotion': emotion,
|
||||||
|
'emotionScore': emotion_score
|
||||||
|
})
|
||||||
|
|
||||||
|
# store to db?
|
||||||
|
|
||||||
|
# new_prediction = Prediction(**{
|
||||||
|
# 'predictionResults': formatted_prediction_results,
|
||||||
|
# 'rawPredictionResults': detections,
|
||||||
|
# 'date': datetime.datetime.now(),
|
||||||
|
# })
|
||||||
|
# new_prediction.save()
|
||||||
|
|
||||||
|
return send_json_response(result, 200)
|
101
deepface/DeepFaceLite.py
Normal file
101
deepface/DeepFaceLite.py
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
from keras.preprocessing import image
|
||||||
|
import warnings
|
||||||
|
warnings.filterwarnings("ignore")
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
from os import path
|
||||||
|
from pathlib import Path
|
||||||
|
import gdown
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
import json
|
||||||
|
import cv2
|
||||||
|
from keras import backend as K
|
||||||
|
import keras
|
||||||
|
import tensorflow as tf
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from deepface import DeepFace
|
||||||
|
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID
|
||||||
|
from deepface.extendedmodels import Age, Gender, Race, Emotion
|
||||||
|
from deepface.commons import functionsLite, realtime, distance as dst
|
||||||
|
|
||||||
|
|
||||||
|
class DeepFaceLite(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
functionsLite.initializeFolder()
|
||||||
|
|
||||||
|
# init models
|
||||||
|
self.detector_backend = 'mtcnn'
|
||||||
|
self.emotion_model = Emotion.loadModel()
|
||||||
|
self.age_model = Age.loadModel()
|
||||||
|
self.gender_model = Gender.loadModel()
|
||||||
|
|
||||||
|
# TODO: init detector
|
||||||
|
|
||||||
|
|
||||||
|
def analyze(self, img, enforce_detection = True, detector_backend = 'opencv'):
|
||||||
|
|
||||||
|
# preprocess images
|
||||||
|
processed = functionsLite.preprocess_face(img, enforce_detection=enforce_detection, detector_backend=detector_backend)
|
||||||
|
imgs_224 = processed['processed']
|
||||||
|
emotion_imgs = processed['gray']
|
||||||
|
bbox_img = processed['bbox']
|
||||||
|
# original_faces = processed['original']
|
||||||
|
|
||||||
|
resp_objects = []
|
||||||
|
|
||||||
|
# iterate through faces
|
||||||
|
for i in range(len(imgs_224)):
|
||||||
|
|
||||||
|
resp_obj = {}
|
||||||
|
|
||||||
|
# --- emotion ---
|
||||||
|
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
||||||
|
|
||||||
|
emotion_predictions = self.emotion_model.predict(emotion_imgs[i])[0,:]
|
||||||
|
|
||||||
|
sum_of_predictions = emotion_predictions.sum()
|
||||||
|
|
||||||
|
all_emotions = {}
|
||||||
|
|
||||||
|
for i in range(0, len(emotion_labels)):
|
||||||
|
emotion_label = emotion_labels[i]
|
||||||
|
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
|
||||||
|
emotion[emotion_label] = emotion_prediction
|
||||||
|
|
||||||
|
emotion = {
|
||||||
|
'all': all_emotions,
|
||||||
|
'dominant': emotion_labels[np.argmax(emotion_predictions)],
|
||||||
|
'dominant_score': np.max(emotion_predictions)
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- age ---
|
||||||
|
age_predictions = self.age_model.predict(imgs_224[i])[0,:]
|
||||||
|
apparent_age = Age.findApparentAge(age_predictions)
|
||||||
|
|
||||||
|
# --- gender ---
|
||||||
|
gender_prediction = self.gender_model.predict(imgs_224[i])[0,:]
|
||||||
|
|
||||||
|
if np.argmax(gender_prediction) == 0:
|
||||||
|
gender = "Woman"
|
||||||
|
elif np.argmax(gender_prediction) == 1:
|
||||||
|
gender = "Man"
|
||||||
|
|
||||||
|
# resp_obj = json.loads(resp_obj)
|
||||||
|
|
||||||
|
resp_obj = {
|
||||||
|
'age': apparent_age,
|
||||||
|
'gender': gender,
|
||||||
|
'emotion': emotion
|
||||||
|
}
|
||||||
|
|
||||||
|
resp_objects.append(resp_obj)
|
||||||
|
|
||||||
|
return bbox_img, resp_objects
|
||||||
|
|
||||||
|
|
||||||
|
|
769
deepface/commons/functionsLite.py
Normal file
769
deepface/commons/functionsLite.py
Normal file
@ -0,0 +1,769 @@
|
|||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from keras.preprocessing.image import load_img, save_img, img_to_array
|
||||||
|
from keras.applications.imagenet_utils import preprocess_input
|
||||||
|
from keras.preprocessing import image
|
||||||
|
import cv2
|
||||||
|
from pathlib import Path
|
||||||
|
import gdown
|
||||||
|
import hashlib
|
||||||
|
import math
|
||||||
|
from PIL import Image
|
||||||
|
import copy
|
||||||
|
import base64
|
||||||
|
import multiprocessing
|
||||||
|
import subprocess
|
||||||
|
import tensorflow as tf
|
||||||
|
import keras
|
||||||
|
import bz2
|
||||||
|
from deepface.commons import distance
|
||||||
|
from mtcnn import MTCNN # 0.1.0
|
||||||
|
|
||||||
|
# draw bounding boxes
|
||||||
|
from PIL import Image, ImageFont, ImageDraw
|
||||||
|
import colorsys
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def loadBase64Img(uri):
|
||||||
|
encoded_data = uri.split(',')[1]
|
||||||
|
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
|
||||||
|
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||||
|
return img
|
||||||
|
|
||||||
|
|
||||||
|
def initializeFolder():
|
||||||
|
home = str(Path.home())
|
||||||
|
|
||||||
|
if not os.path.exists(home + "/.deepface"):
|
||||||
|
os.mkdir(home + "/.deepface")
|
||||||
|
print("Directory ", home, "/.deepface created")
|
||||||
|
|
||||||
|
if not os.path.exists(home + "/.deepface/weights"):
|
||||||
|
os.mkdir(home + "/.deepface/weights")
|
||||||
|
print("Directory ", home, "/.deepface/weights created")
|
||||||
|
|
||||||
|
|
||||||
|
def findThreshold(model_name, distance_metric):
|
||||||
|
threshold = 0.40
|
||||||
|
|
||||||
|
if model_name == 'VGG-Face':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.40
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 0.55
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.75
|
||||||
|
|
||||||
|
elif model_name == 'OpenFace':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.10
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 0.55
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.55
|
||||||
|
|
||||||
|
elif model_name == 'Facenet':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.40
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 10
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.80
|
||||||
|
|
||||||
|
elif model_name == 'DeepFace':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.23
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 64
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.64
|
||||||
|
|
||||||
|
elif model_name == 'DeepID':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.015
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 45
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.17
|
||||||
|
|
||||||
|
elif model_name == 'Dlib':
|
||||||
|
if distance_metric == 'cosine':
|
||||||
|
threshold = 0.07
|
||||||
|
elif distance_metric == 'euclidean':
|
||||||
|
threshold = 0.60
|
||||||
|
elif distance_metric == 'euclidean_l2':
|
||||||
|
threshold = 0.60
|
||||||
|
|
||||||
|
return threshold
|
||||||
|
|
||||||
|
|
||||||
|
def get_opencv_path():
|
||||||
|
opencv_home = cv2.__file__
|
||||||
|
folders = opencv_home.split(os.path.sep)[0:-1]
|
||||||
|
|
||||||
|
path = folders[0]
|
||||||
|
for folder in folders[1:]:
|
||||||
|
path = path + "/" + folder
|
||||||
|
|
||||||
|
return path + "/data/"
|
||||||
|
|
||||||
|
|
||||||
|
def load_image(img):
|
||||||
|
exact_image = False
|
||||||
|
if type(img).__module__ == np.__name__:
|
||||||
|
exact_image = True
|
||||||
|
|
||||||
|
base64_img = False
|
||||||
|
if len(img) > 11 and img[0:11] == "data:image/":
|
||||||
|
base64_img = True
|
||||||
|
|
||||||
|
# ---------------------------
|
||||||
|
|
||||||
|
if base64_img == True:
|
||||||
|
img = loadBase64Img(img)
|
||||||
|
|
||||||
|
elif exact_image != True: # image path passed as input
|
||||||
|
if os.path.isfile(img) != True:
|
||||||
|
raise ValueError("Confirm that ", img, " exists")
|
||||||
|
|
||||||
|
img = cv2.imread(img)
|
||||||
|
|
||||||
|
return img
|
||||||
|
|
||||||
|
|
||||||
|
def detect_face(img, detector_backend='opencv', enforce_detection=True):
|
||||||
|
home = str(Path.home())
|
||||||
|
|
||||||
|
# drawing settings
|
||||||
|
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
|
||||||
|
size=np.floor(3e-2 * img.size[1] + 0.5).astype('int32'))
|
||||||
|
thickness = (img.size[0] + img.size[1]) // 300
|
||||||
|
hsv_tuples = [(x / 50, 1., 1.)
|
||||||
|
for x in range(50)]
|
||||||
|
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
|
||||||
|
colors = list(
|
||||||
|
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
|
||||||
|
np.random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
|
||||||
|
|
||||||
|
|
||||||
|
# keep PIL image and cv2 image
|
||||||
|
pil_img = img
|
||||||
|
cv2_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
|
|
||||||
|
if detector_backend == 'opencv':
|
||||||
|
|
||||||
|
# get opencv configuration up first
|
||||||
|
opencv_path = get_opencv_path()
|
||||||
|
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
|
||||||
|
|
||||||
|
if os.path.isfile(face_detector_path) != True:
|
||||||
|
raise ValueError("Confirm that opencv is installed on your environment! Expected path ", face_detector_path,
|
||||||
|
" violated.")
|
||||||
|
|
||||||
|
face_detector = cv2.CascadeClassifier(face_detector_path)
|
||||||
|
|
||||||
|
# --------------------------
|
||||||
|
|
||||||
|
faces = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
faces = face_detector.detectMultiScale(cv2_img, 1.3, 5)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if len(faces) > 0:
|
||||||
|
detected_faces = []
|
||||||
|
|
||||||
|
showid = True if len(faces) > 1 else False
|
||||||
|
|
||||||
|
for i, face in enumerate(faces):
|
||||||
|
x, y, w, h = face
|
||||||
|
detected_face = cv2_img[int(y):int(y + h), int(x):int(x + w)]
|
||||||
|
detected_faces.append(detected_face)
|
||||||
|
|
||||||
|
# bounding box corners
|
||||||
|
top, left, bottom, right = [int(y), int(x), int(y+h), int(x+w)]
|
||||||
|
top = max(0, np.floor(top + 0.5).astype('int32'))
|
||||||
|
left = max(0, np.floor(left + 0.5).astype('int32'))
|
||||||
|
bottom = min(pil_img.size[1], np.floor(bottom + 0.5).astype('int32'))
|
||||||
|
right = min(pil_img.size[0], np.floor(right + 0.5).astype('int32'))
|
||||||
|
|
||||||
|
# label
|
||||||
|
label = 'ID: {}'.format(i)
|
||||||
|
draw = ImageDraw.Draw(pil_img)
|
||||||
|
label_size = draw.textsize(label, font=font)
|
||||||
|
|
||||||
|
if top - label_size[1] >= 0:
|
||||||
|
text_origin = np.array([left, top - label_size[1]])
|
||||||
|
else:
|
||||||
|
text_origin = np.array([left, top + 1])
|
||||||
|
|
||||||
|
# My kingdom for a good redistributable image drawing library.
|
||||||
|
for i in range(thickness):
|
||||||
|
draw.rectangle(
|
||||||
|
[left + i, top + i, right - i, bottom - i],
|
||||||
|
outline=colors[i])
|
||||||
|
|
||||||
|
if showid:
|
||||||
|
draw.rectangle(
|
||||||
|
[tuple(text_origin), tuple(text_origin + label_size)],
|
||||||
|
fill=colors[i]
|
||||||
|
)
|
||||||
|
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
return pil_img, detected_faces
|
||||||
|
|
||||||
|
else: # if no face detected
|
||||||
|
|
||||||
|
if enforce_detection != True:
|
||||||
|
return pil_img, cv2_img
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
||||||
|
|
||||||
|
elif detector_backend == 'ssd':
|
||||||
|
|
||||||
|
# ---------------------------
|
||||||
|
# check required ssd model exists in the home/.deepface/weights folder
|
||||||
|
|
||||||
|
# model structure
|
||||||
|
if os.path.isfile(home + '/.deepface/weights/deploy.prototxt') != True:
|
||||||
|
print("deploy.prototxt will be downloaded...")
|
||||||
|
|
||||||
|
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
|
||||||
|
|
||||||
|
output = home + '/.deepface/weights/deploy.prototxt'
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
# pre-trained weights
|
||||||
|
if os.path.isfile(home + '/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel') != True:
|
||||||
|
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
|
||||||
|
|
||||||
|
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
|
||||||
|
|
||||||
|
output = home + '/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel'
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
# ---------------------------
|
||||||
|
|
||||||
|
ssd_detector = cv2.dnn.readNetFromCaffe(
|
||||||
|
home + "/.deepface/weights/deploy.prototxt",
|
||||||
|
home + "/.deepface/weights/res10_300x300_ssd_iter_140000.caffemodel"
|
||||||
|
)
|
||||||
|
|
||||||
|
ssd_labels = ["img_id", "is_face", "confidence", "left", "top", "right", "bottom"]
|
||||||
|
|
||||||
|
target_size = (300, 300)
|
||||||
|
|
||||||
|
base_img = cv2_img.copy() # we will restore base_img to img later
|
||||||
|
|
||||||
|
original_size = cv2_img.shape
|
||||||
|
|
||||||
|
cv2_img = cv2.resize(cv2_img, target_size)
|
||||||
|
|
||||||
|
aspect_ratio_x = (original_size[1] / target_size[1])
|
||||||
|
aspect_ratio_y = (original_size[0] / target_size[0])
|
||||||
|
|
||||||
|
imageBlob = cv2.dnn.blobFromImage(image=cv2_img)
|
||||||
|
|
||||||
|
ssd_detector.setInput(imageBlob)
|
||||||
|
detections = ssd_detector.forward()
|
||||||
|
|
||||||
|
detections_df = pd.DataFrame(detections[0][0], columns=ssd_labels)
|
||||||
|
|
||||||
|
detections_df = detections_df[detections_df['is_face'] == 1] # 0: background, 1: face
|
||||||
|
detections_df = detections_df[detections_df['confidence'] >= 0.90]
|
||||||
|
|
||||||
|
detections_df['left'] = (detections_df['left'] * 300).astype(int)
|
||||||
|
detections_df['bottom'] = (detections_df['bottom'] * 300).astype(int)
|
||||||
|
detections_df['right'] = (detections_df['right'] * 300).astype(int)
|
||||||
|
detections_df['top'] = (detections_df['top'] * 300).astype(int)
|
||||||
|
|
||||||
|
if detections_df.shape[0] > 0:
|
||||||
|
|
||||||
|
showid = True if detections_df.shape[0] > 1 else False
|
||||||
|
|
||||||
|
# TODO: sort detections_df
|
||||||
|
|
||||||
|
detected_faces = []
|
||||||
|
|
||||||
|
for i in range(0, len(detections_df)):
|
||||||
|
instance = detections_df.iloc[i]
|
||||||
|
left = instance["left"]
|
||||||
|
right = instance["right"]
|
||||||
|
bottom = instance["bottom"]
|
||||||
|
top = instance["top"]
|
||||||
|
|
||||||
|
detected_face = base_img[int(top * aspect_ratio_y):int(bottom * aspect_ratio_y), int(left * aspect_ratio_x):int(right * aspect_ratio_x)]
|
||||||
|
detected_faces.append(detected_face)
|
||||||
|
|
||||||
|
# bounding box corners
|
||||||
|
top, left, bottom, right = [int(top * aspect_ratio_y), int(left * aspect_ratio_x), int(bottom * aspect_ratio_y), int(right * aspect_ratio_x)]
|
||||||
|
top = max(0, np.floor(top + 0.5).astype('int32'))
|
||||||
|
left = max(0, np.floor(left + 0.5).astype('int32'))
|
||||||
|
bottom = min(pil_img.size[1], np.floor(bottom + 0.5).astype('int32'))
|
||||||
|
right = min(pil_img.size[0], np.floor(right + 0.5).astype('int32'))
|
||||||
|
|
||||||
|
# label
|
||||||
|
label = 'ID: {}'.format(i)
|
||||||
|
draw = ImageDraw.Draw(pil_img)
|
||||||
|
label_size = draw.textsize(label, font=font)
|
||||||
|
|
||||||
|
if top - label_size[1] >= 0:
|
||||||
|
text_origin = np.array([left, top - label_size[1]])
|
||||||
|
else:
|
||||||
|
text_origin = np.array([left, top + 1])
|
||||||
|
|
||||||
|
# My kingdom for a good redistributable image drawing library.
|
||||||
|
for i in range(thickness):
|
||||||
|
draw.rectangle(
|
||||||
|
[left + i, top + i, right - i, bottom - i],
|
||||||
|
outline=colors[i])
|
||||||
|
|
||||||
|
if showid:
|
||||||
|
draw.rectangle(
|
||||||
|
[tuple(text_origin), tuple(text_origin + label_size)],
|
||||||
|
fill=colors[i]
|
||||||
|
)
|
||||||
|
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
|
||||||
|
return pil_img, detected_faces
|
||||||
|
|
||||||
|
else: # if no face detected
|
||||||
|
|
||||||
|
if enforce_detection != True:
|
||||||
|
img = base_img.copy()
|
||||||
|
return pil_img, img
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
||||||
|
|
||||||
|
elif detector_backend == 'dlib':
|
||||||
|
import dlib # this is not a must library within deepface. that's why, I didn't put this import to a global level. version: 19.20.0
|
||||||
|
|
||||||
|
detector = dlib.get_frontal_face_detector()
|
||||||
|
|
||||||
|
detections = detector(cv2_img, 1)
|
||||||
|
|
||||||
|
if len(detections) > 0:
|
||||||
|
|
||||||
|
showid = True if len(detections) > 1 else False
|
||||||
|
|
||||||
|
detected_faces = []
|
||||||
|
for i, d in enumerate(detections):
|
||||||
|
|
||||||
|
left = d.left()
|
||||||
|
right = d.right()
|
||||||
|
top = d.top()
|
||||||
|
bottom = d.bottom()
|
||||||
|
|
||||||
|
detected_face = cv2_img[top:bottom, left:right]
|
||||||
|
detected_faces.append(detected_face)
|
||||||
|
|
||||||
|
|
||||||
|
# bounding box corners
|
||||||
|
top = max(0, np.floor(top + 0.5).astype('int32'))
|
||||||
|
left = max(0, np.floor(left + 0.5).astype('int32'))
|
||||||
|
bottom = min(pil_img.size[1], np.floor(bottom + 0.5).astype('int32'))
|
||||||
|
right = min(pil_img.size[0], np.floor(right + 0.5).astype('int32'))
|
||||||
|
|
||||||
|
# label
|
||||||
|
label = 'ID: {}'.format(i)
|
||||||
|
draw = ImageDraw.Draw(pil_img)
|
||||||
|
label_size = draw.textsize(label, font=font)
|
||||||
|
|
||||||
|
if top - label_size[1] >= 0:
|
||||||
|
text_origin = np.array([left, top - label_size[1]])
|
||||||
|
else:
|
||||||
|
text_origin = np.array([left, top + 1])
|
||||||
|
|
||||||
|
# My kingdom for a good redistributable image drawing library.
|
||||||
|
for i in range(thickness):
|
||||||
|
draw.rectangle(
|
||||||
|
[left + i, top + i, right - i, bottom - i],
|
||||||
|
outline=colors[i])
|
||||||
|
|
||||||
|
if showid:
|
||||||
|
draw.rectangle(
|
||||||
|
[tuple(text_origin), tuple(text_origin + label_size)],
|
||||||
|
fill=colors[i]
|
||||||
|
)
|
||||||
|
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
|
||||||
|
return pil_img, detected_faces
|
||||||
|
|
||||||
|
|
||||||
|
else: # if no face detected
|
||||||
|
|
||||||
|
if enforce_detection != True:
|
||||||
|
return pil_img, img
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
||||||
|
|
||||||
|
elif detector_backend == 'mtcnn':
|
||||||
|
|
||||||
|
mtcnn_detector = MTCNN()
|
||||||
|
|
||||||
|
detections = mtcnn_detector.detect_faces(cv2_img)
|
||||||
|
|
||||||
|
if len(detections) > 0:
|
||||||
|
|
||||||
|
showid = True if len(detections) > 1 else False
|
||||||
|
|
||||||
|
detected_faces = []
|
||||||
|
for i, detection in enumerate(detections):
|
||||||
|
x, y, w, h = detection["box"]
|
||||||
|
detected_face = cv2_img[int(y):int(y + h), int(x):int(x + w)]
|
||||||
|
detected_faces.append(detected_face)
|
||||||
|
|
||||||
|
# bounding box corners
|
||||||
|
top, left, bottom, right = [int(y), int(x), int(y+h), int(x+w)]
|
||||||
|
top = max(0, np.floor(top + 0.5).astype('int32'))
|
||||||
|
left = max(0, np.floor(left + 0.5).astype('int32'))
|
||||||
|
bottom = min(pil_img.size[1], np.floor(bottom + 0.5).astype('int32'))
|
||||||
|
right = min(pil_img.size[0], np.floor(right + 0.5).astype('int32'))
|
||||||
|
|
||||||
|
# label
|
||||||
|
print(i)
|
||||||
|
label = 'ID: {}'.format(i)
|
||||||
|
draw = ImageDraw.Draw(pil_img)
|
||||||
|
label_size = draw.textsize(label, font=font)
|
||||||
|
|
||||||
|
if top - label_size[1] >= 0:
|
||||||
|
text_origin = np.array([left, top - label_size[1]])
|
||||||
|
else:
|
||||||
|
text_origin = np.array([left, top + 1])
|
||||||
|
|
||||||
|
# My kingdom for a good redistributable image drawing library.
|
||||||
|
for i in range(thickness):
|
||||||
|
draw.rectangle(
|
||||||
|
[left + i, top + i, right - i, bottom - i],
|
||||||
|
outline=colors[i])
|
||||||
|
|
||||||
|
if showid:
|
||||||
|
draw.rectangle(
|
||||||
|
[tuple(text_origin), tuple(text_origin + label_size)],
|
||||||
|
fill=colors[i]
|
||||||
|
)
|
||||||
|
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
return pil_img, detected_faces
|
||||||
|
|
||||||
|
else: # if no face detected
|
||||||
|
if enforce_detection != True:
|
||||||
|
return pil_img, img
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
|
||||||
|
|
||||||
|
else:
|
||||||
|
detectors = ['opencv', 'ssd', 'dlib', 'mtcnn']
|
||||||
|
raise ValueError("Valid backends are ", detectors, " but you passed ", detector_backend)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def alignment_procedure(img, left_eye, right_eye):
|
||||||
|
# this function aligns given face in img based on left and right eye coordinates
|
||||||
|
|
||||||
|
left_eye_x, left_eye_y = left_eye
|
||||||
|
right_eye_x, right_eye_y = right_eye
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# find rotation direction
|
||||||
|
|
||||||
|
if left_eye_y > right_eye_y:
|
||||||
|
point_3rd = (right_eye_x, left_eye_y)
|
||||||
|
direction = -1 # rotate same direction to clock
|
||||||
|
else:
|
||||||
|
point_3rd = (left_eye_x, right_eye_y)
|
||||||
|
direction = 1 # rotate inverse direction of clock
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# find length of triangle edges
|
||||||
|
|
||||||
|
a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd))
|
||||||
|
b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd))
|
||||||
|
c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye))
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
|
||||||
|
# apply cosine rule
|
||||||
|
|
||||||
|
if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation
|
||||||
|
|
||||||
|
cos_a = (b * b + c * c - a * a) / (2 * b * c)
|
||||||
|
angle = np.arccos(cos_a) # angle in radian
|
||||||
|
angle = (angle * 180) / math.pi # radian to degree
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# rotate base image
|
||||||
|
|
||||||
|
if direction == -1:
|
||||||
|
angle = 90 - angle
|
||||||
|
|
||||||
|
img = Image.fromarray(img)
|
||||||
|
img = np.array(img.rotate(direction * angle))
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
|
||||||
|
return img # return img anyway
|
||||||
|
|
||||||
|
|
||||||
|
def align_face(img, detector_backend='opencv'):
|
||||||
|
home = str(Path.home())
|
||||||
|
|
||||||
|
if (detector_backend == 'opencv') or (detector_backend == 'ssd'):
|
||||||
|
|
||||||
|
opencv_path = get_opencv_path()
|
||||||
|
eye_detector_path = opencv_path + "haarcascade_eye.xml"
|
||||||
|
eye_detector = cv2.CascadeClassifier(eye_detector_path)
|
||||||
|
|
||||||
|
detected_face_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # eye detector expects gray scale image
|
||||||
|
|
||||||
|
eyes = eye_detector.detectMultiScale(detected_face_gray)
|
||||||
|
|
||||||
|
if len(eyes) >= 2:
|
||||||
|
|
||||||
|
# find the largest 2 eye
|
||||||
|
|
||||||
|
base_eyes = eyes[:, 2]
|
||||||
|
|
||||||
|
items = []
|
||||||
|
for i in range(0, len(base_eyes)):
|
||||||
|
item = (base_eyes[i], i)
|
||||||
|
items.append(item)
|
||||||
|
|
||||||
|
df = pd.DataFrame(items, columns=["length", "idx"]).sort_values(by=['length'], ascending=False)
|
||||||
|
|
||||||
|
eyes = eyes[df.idx.values[0:2]] # eyes variable stores the largest 2 eye
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# decide left and right eye
|
||||||
|
|
||||||
|
eye_1 = eyes[0]
|
||||||
|
eye_2 = eyes[1]
|
||||||
|
|
||||||
|
if eye_1[0] < eye_2[0]:
|
||||||
|
left_eye = eye_1
|
||||||
|
right_eye = eye_2
|
||||||
|
else:
|
||||||
|
left_eye = eye_2
|
||||||
|
right_eye = eye_1
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# find center of eyes
|
||||||
|
|
||||||
|
left_eye = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
|
||||||
|
right_eye = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))
|
||||||
|
|
||||||
|
img = alignment_procedure(img, left_eye, right_eye)
|
||||||
|
|
||||||
|
return img # return img anyway
|
||||||
|
|
||||||
|
elif detector_backend == 'dlib':
|
||||||
|
|
||||||
|
# check required file exists in the home/.deepface/weights folder
|
||||||
|
|
||||||
|
if os.path.isfile(home + '/.deepface/weights/shape_predictor_5_face_landmarks.dat') != True:
|
||||||
|
print("shape_predictor_5_face_landmarks.dat.bz2 is going to be downloaded")
|
||||||
|
|
||||||
|
url = "http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
|
||||||
|
output = home + '/.deepface/weights/' + url.split("/")[-1]
|
||||||
|
|
||||||
|
gdown.download(url, output, quiet=False)
|
||||||
|
|
||||||
|
zipfile = bz2.BZ2File(output)
|
||||||
|
data = zipfile.read()
|
||||||
|
newfilepath = output[:-4] # discard .bz2 extension
|
||||||
|
open(newfilepath, 'wb').write(data)
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
|
||||||
|
import dlib # this is not a must dependency in deepface
|
||||||
|
|
||||||
|
detector = dlib.get_frontal_face_detector()
|
||||||
|
sp = dlib.shape_predictor(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat")
|
||||||
|
|
||||||
|
detections = detector(img, 1)
|
||||||
|
|
||||||
|
if len(detections) > 0:
|
||||||
|
detected_face = detections[0]
|
||||||
|
img_shape = sp(img, detected_face)
|
||||||
|
img = dlib.get_face_chip(img, img_shape, size=img.shape[0])
|
||||||
|
|
||||||
|
return img # return img anyway
|
||||||
|
|
||||||
|
elif detector_backend == 'mtcnn':
|
||||||
|
|
||||||
|
mtcnn_detector = MTCNN()
|
||||||
|
detections = mtcnn_detector.detect_faces(img)
|
||||||
|
|
||||||
|
if len(detections) > 0:
|
||||||
|
detection = detections[0]
|
||||||
|
|
||||||
|
keypoints = detection["keypoints"]
|
||||||
|
left_eye = keypoints["left_eye"]
|
||||||
|
right_eye = keypoints["right_eye"]
|
||||||
|
|
||||||
|
img = alignment_procedure(img, left_eye, right_eye)
|
||||||
|
|
||||||
|
return img # return img anyway
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_face(base_img, enforce_detection=True, detector_backend='opencv'):
|
||||||
|
|
||||||
|
cv2_img = cv2.cvtColor(np.asarray(base_img),cv2.COLOR_RGB2BGR)
|
||||||
|
bbox_img, faces = detect_face(img=base_img, detector_backend=detector_backend,
|
||||||
|
enforce_detection=enforce_detection)
|
||||||
|
orig_faces = faces.copy()
|
||||||
|
|
||||||
|
# --------------------------
|
||||||
|
|
||||||
|
for i in range(len(faces)):
|
||||||
|
|
||||||
|
face = faces[i]
|
||||||
|
|
||||||
|
if face.shape[0] > 0 and face.shape[1] > 0:
|
||||||
|
faces[i] = align_face(img=face, detector_backend=detector_backend)
|
||||||
|
else:
|
||||||
|
|
||||||
|
if enforce_detection == True:
|
||||||
|
raise ValueError("Detected face shape is ", face.shape,
|
||||||
|
". Consider to set enforce_detection argument to False.")
|
||||||
|
else: # restore base image
|
||||||
|
faces[i] = cv2_img
|
||||||
|
|
||||||
|
# --------------------------
|
||||||
|
|
||||||
|
# post-processing
|
||||||
|
pixels = []
|
||||||
|
pixels_gray = []
|
||||||
|
|
||||||
|
for img in faces:
|
||||||
|
# RBG
|
||||||
|
target_size = (224, 224)
|
||||||
|
img_rgb = cv2.resize(img, target_size)
|
||||||
|
img_pixels = image.img_to_array(img_rgb)
|
||||||
|
img_pixels = np.expand_dims(img_pixels, axis=0)
|
||||||
|
img_pixels /= 255 # normalize input in [0, 1]
|
||||||
|
pixels.append(img_pixels)
|
||||||
|
|
||||||
|
# gray scale
|
||||||
|
target_size = (48, 48)
|
||||||
|
img_gray = cv2.resize(img, target_size)
|
||||||
|
img_gray = cv2.cvtColor(img_gray, cv2.COLOR_BGR2GRAY)
|
||||||
|
img_pixels_gray = image.img_to_array(img_gray)
|
||||||
|
print(img_pixels_gray.shape)
|
||||||
|
img_pixels_gray = np.expand_dims(img_pixels_gray, axis=0)
|
||||||
|
img_pixels_gray /= 255 # normalize input in [0, 1]
|
||||||
|
pixels_gray.append(img_pixels_gray)
|
||||||
|
|
||||||
|
return {'processed': pixels, 'original': orig_faces,
|
||||||
|
'gray': pixels_gray, 'bbox': bbox_img}
|
||||||
|
|
||||||
|
|
||||||
|
def allocateMemory():
|
||||||
|
# find allocated memories
|
||||||
|
gpu_indexes = []
|
||||||
|
memory_usage_percentages = []
|
||||||
|
available_memories = []
|
||||||
|
total_memories = []
|
||||||
|
utilizations = []
|
||||||
|
power_usages = []
|
||||||
|
power_capacities = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(['nvidia-smi'])
|
||||||
|
|
||||||
|
dashboard = result.decode("utf-8").split("=|")
|
||||||
|
|
||||||
|
dashboard = dashboard[1].split("\n")
|
||||||
|
|
||||||
|
gpu_idx = 0
|
||||||
|
for line in dashboard:
|
||||||
|
if ("MiB" in line):
|
||||||
|
power_info = line.split("|")[1]
|
||||||
|
power_capacity = int(power_info.split("/")[-1].replace("W", ""))
|
||||||
|
power_usage = int((power_info.split("/")[-2]).strip().split(" ")[-1].replace("W", ""))
|
||||||
|
|
||||||
|
power_usages.append(power_usage)
|
||||||
|
power_capacities.append(power_capacity)
|
||||||
|
|
||||||
|
# ----------------------------
|
||||||
|
|
||||||
|
memory_info = line.split("|")[2].replace("MiB", "").split("/")
|
||||||
|
utilization_info = int(line.split("|")[3].split("%")[0])
|
||||||
|
|
||||||
|
allocated = int(memory_info[0])
|
||||||
|
total_memory = int(memory_info[1])
|
||||||
|
available_memory = total_memory - allocated
|
||||||
|
|
||||||
|
total_memories.append(total_memory)
|
||||||
|
available_memories.append(available_memory)
|
||||||
|
memory_usage_percentages.append(round(100 * int(allocated) / int(total_memory), 4))
|
||||||
|
utilizations.append(utilization_info)
|
||||||
|
gpu_indexes.append(gpu_idx)
|
||||||
|
|
||||||
|
gpu_idx = gpu_idx + 1
|
||||||
|
|
||||||
|
gpu_count = gpu_idx * 1
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
gpu_count = 0
|
||||||
|
# print(str(err))
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
|
||||||
|
df = pd.DataFrame(gpu_indexes, columns=["gpu_index"])
|
||||||
|
df["total_memories_in_mb"] = total_memories
|
||||||
|
df["available_memories_in_mb"] = available_memories
|
||||||
|
df["memory_usage_percentage"] = memory_usage_percentages
|
||||||
|
df["utilizations"] = utilizations
|
||||||
|
df["power_usages_in_watts"] = power_usages
|
||||||
|
df["power_capacities_in_watts"] = power_capacities
|
||||||
|
|
||||||
|
df = df.sort_values(by=["available_memories_in_mb"], ascending=False).reset_index(drop=True)
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
|
||||||
|
required_memory = 10000 # All deepface models require 9016 MiB
|
||||||
|
|
||||||
|
if df.shape[0] > 0: # has gpu
|
||||||
|
if df.iloc[0].available_memories_in_mb > required_memory:
|
||||||
|
my_gpu = str(int(df.iloc[0].gpu_index))
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = my_gpu
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
# tf allocates all memory by default
|
||||||
|
# this block avoids greedy approach
|
||||||
|
|
||||||
|
config = tf.ConfigProto()
|
||||||
|
config.gpu_options.allow_growth = True
|
||||||
|
session = tf.Session(config=config)
|
||||||
|
keras.backend.set_session(session)
|
||||||
|
|
||||||
|
print("DeepFace will run on GPU (gpu_", my_gpu, ")")
|
||||||
|
else:
|
||||||
|
# this case has gpu but no enough memory to allocate
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "" # run it on cpu
|
||||||
|
print("Even though the system has GPUs, there is no enough space in memory to allocate.")
|
||||||
|
print("DeepFace will run on CPU")
|
||||||
|
else:
|
||||||
|
print("DeepFace will run on CPU")
|
143
deepface_lite.ipynb
Normal file
143
deepface_lite.ipynb
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {
|
||||||
|
"collapsed": true,
|
||||||
|
"pycharm": {
|
||||||
|
"name": "#%%\n"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from deepface.DeepFaceLite import DeepFaceLite\n",
|
||||||
|
"import os\n",
|
||||||
|
"import cv2\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from PIL import Image"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"output_type": "error",
|
||||||
|
"ename": "FileNotFoundError",
|
||||||
|
"evalue": "[Errno 2] No such file or directory: 'test_imgs/'",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
|
||||||
|
"\u001b[0;32m<ipython-input-2-cd20a3f3900f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mimgs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mimg_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'test_imgs/'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mimg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_dir\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mendswith\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'jpg'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mendswith\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'jpeg'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mimg_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_dir\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'test_imgs/'"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# read images\n",
|
||||||
|
"imgs = []\n",
|
||||||
|
"img_dir = 'test_imgs/'\n",
|
||||||
|
"for img in os.listdir(img_dir):\n",
|
||||||
|
" if img.endswith('jpg') or img.endswith('jpeg'):\n",
|
||||||
|
" img_path = os.path.join(img_dir, img)\n",
|
||||||
|
" imgs.append(img_path)"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"collapsed": false,
|
||||||
|
"pycharm": {
|
||||||
|
"name": "#%%\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"output_type": "error",
|
||||||
|
"ename": "NameError",
|
||||||
|
"evalue": "name 'imgs' is not defined",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||||
|
"\u001b[0;32m<ipython-input-2-5961b116db92>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimgs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;31m# im.show()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[0;31mNameError\u001b[0m: name 'imgs' is not defined"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"im = Image.open(imgs[0])\n",
|
||||||
|
"# im.show()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"collapsed": false,
|
||||||
|
"pycharm": {
|
||||||
|
"name": "#%%\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"deepface = DeepFaceLite()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"collapsed": false,
|
||||||
|
"pycharm": {
|
||||||
|
"name": "#%%\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"img, responses = deepface.analyze(im, detector_backend='mtcnn')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"responses"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"name": "pycharm-f9dc5b7",
|
||||||
|
"language": "python",
|
||||||
|
"display_name": "PyCharm (deepface)"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 2
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython2",
|
||||||
|
"version": "3.8.5-final"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 0
|
||||||
|
}
|
84
extension/constants.py
Normal file
84
extension/constants.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
CONSTANTS = {
|
||||||
|
'ANIMAL_TYPE': ['cat', 'dog'],
|
||||||
|
'EMOTION': {
|
||||||
|
'HAPPY': 'Happy',
|
||||||
|
'NEUTRAL': 'Neutral',
|
||||||
|
'ANXIOUS': 'Anxious',
|
||||||
|
'SAD': 'Sad',
|
||||||
|
'UNSETTLED': 'Unsettled'
|
||||||
|
},
|
||||||
|
'STATISTICAL_DATA': {
|
||||||
|
"Cat": {
|
||||||
|
"feedback_number": 0,
|
||||||
|
"prediction_number": 0,
|
||||||
|
"prediction_data": {
|
||||||
|
"breed": {},
|
||||||
|
"emotion": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"feedback_data": {
|
||||||
|
"breed": {
|
||||||
|
"wrong": {},
|
||||||
|
"correct": {}
|
||||||
|
},
|
||||||
|
"emotion": {
|
||||||
|
"wrong": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
},
|
||||||
|
"correct": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Dog": {
|
||||||
|
"feedback_number": 0,
|
||||||
|
"prediction_number": 0,
|
||||||
|
"prediction_data": {
|
||||||
|
"breed": {},
|
||||||
|
"emotion": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"feedback_data": {
|
||||||
|
"breed": {
|
||||||
|
"wrong": {},
|
||||||
|
"correct": {}
|
||||||
|
},
|
||||||
|
"emotion": {
|
||||||
|
"wrong": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
},
|
||||||
|
"correct": {
|
||||||
|
"Happy": 0,
|
||||||
|
"Neutral": 0,
|
||||||
|
"Anxious": 0,
|
||||||
|
"Sad": 0,
|
||||||
|
"Unsettled": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
14
extension/utilServices.py
Normal file
14
extension/utilServices.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from flask import (Response)
|
||||||
|
from json import dumps
|
||||||
|
|
||||||
|
|
||||||
|
def send_json_response(body, status):
|
||||||
|
"""
|
||||||
|
format json response
|
||||||
|
:param body: dic object going to send
|
||||||
|
:param status: response status
|
||||||
|
:return: formatted response
|
||||||
|
"""
|
||||||
|
return Response(response=dumps(body),
|
||||||
|
status=status,
|
||||||
|
mimetype="application/json")
|
BIN
font/FiraMono-Medium.otf
Normal file
BIN
font/FiraMono-Medium.otf
Normal file
Binary file not shown.
45
font/SIL Open Font License.txt
Normal file
45
font/SIL Open Font License.txt
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
Copyright (c) 2014, Mozilla Foundation https://mozilla.org/ with Reserved Font Name Fira Mono.
|
||||||
|
|
||||||
|
Copyright (c) 2014, Telefonica S.A.
|
||||||
|
|
||||||
|
This Font Software is licensed under the SIL Open Font License, Version 1.1.
|
||||||
|
This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
|
||||||
|
|
||||||
|
-----------------------------------------------------------
|
||||||
|
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
|
||||||
|
-----------------------------------------------------------
|
||||||
|
|
||||||
|
PREAMBLE
|
||||||
|
The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others.
|
||||||
|
|
||||||
|
The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives.
|
||||||
|
|
||||||
|
DEFINITIONS
|
||||||
|
"Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation.
|
||||||
|
|
||||||
|
"Reserved Font Name" refers to any names specified as such after the copyright statement(s).
|
||||||
|
|
||||||
|
"Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s).
|
||||||
|
|
||||||
|
"Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment.
|
||||||
|
|
||||||
|
"Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software.
|
||||||
|
|
||||||
|
PERMISSION & CONDITIONS
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions:
|
||||||
|
|
||||||
|
1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself.
|
||||||
|
|
||||||
|
2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user.
|
||||||
|
|
||||||
|
3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users.
|
||||||
|
|
||||||
|
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission.
|
||||||
|
|
||||||
|
5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software.
|
||||||
|
|
||||||
|
TERMINATION
|
||||||
|
This license becomes null and void if any of the above conditions are not met.
|
||||||
|
|
||||||
|
DISCLAIMER
|
||||||
|
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
|
0
models/__init__.py
Normal file
0
models/__init__.py
Normal file
15
models/feedback.py
Normal file
15
models/feedback.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from config import db
|
||||||
|
|
||||||
|
|
||||||
|
class FeedbackContent(db.EmbeddedDocument):
|
||||||
|
type = db.StringField(required=True)
|
||||||
|
breedFeedback = db.StringField(required=True)
|
||||||
|
breedCorrectness = db.BooleanField(required=True)
|
||||||
|
emotionFeedback = db.StringField(required=True)
|
||||||
|
emotionCorrectness = db.BooleanField(required=True)
|
||||||
|
|
||||||
|
|
||||||
|
class Feedback(db.Document):
|
||||||
|
meta = {'collection': 'feedbacks'}
|
||||||
|
content = db.EmbeddedDocumentListField('FeedbackContent', required=True)
|
||||||
|
date = db.DateTimeField(required=True)
|
8
models/prediction.py
Normal file
8
models/prediction.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
from config import db
|
||||||
|
|
||||||
|
|
||||||
|
class Prediction(db.Document):
|
||||||
|
meta = {'collection': 'predictions'}
|
||||||
|
predictionResults = db.ListField(required=True)
|
||||||
|
rawPredictionResults = db.ListField(required=True)
|
||||||
|
date = db.DateTimeField(required=True)
|
6757
static/css/bootstrap.css
vendored
Normal file
6757
static/css/bootstrap.css
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
static/css/bootstrap.css.map
Normal file
1
static/css/bootstrap.css.map
Normal file
File diff suppressed because one or more lines are too long
6
static/css/bootstrap.min.css
vendored
Normal file
6
static/css/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
static/css/bootstrap.min.css.map
Normal file
1
static/css/bootstrap.min.css.map
Normal file
File diff suppressed because one or more lines are too long
550
static/css/fileinput.css
Normal file
550
static/css/fileinput.css
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
/*!
|
||||||
|
* bootstrap-fileinput v5.0.8
|
||||||
|
* http://plugins.krajee.com/file-input
|
||||||
|
*
|
||||||
|
* Krajee default styling for bootstrap-fileinput.
|
||||||
|
*
|
||||||
|
* Author: Kartik Visweswaran
|
||||||
|
* Copyright: 2014 - 2019, Kartik Visweswaran, Krajee.com
|
||||||
|
*
|
||||||
|
* Licensed under the BSD-3-Clause
|
||||||
|
* https://github.com/kartik-v/bootstrap-fileinput/blob/master/LICENSE.md
|
||||||
|
*/
|
||||||
|
.file-loading input[type=file], input[type=file].file-loading {
|
||||||
|
width: 0;
|
||||||
|
height: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-no-browse {
|
||||||
|
position: absolute;
|
||||||
|
left: 50%;
|
||||||
|
bottom: 20%;
|
||||||
|
width: 1px;
|
||||||
|
height: 1px;
|
||||||
|
font-size: 0;
|
||||||
|
opacity: 0;
|
||||||
|
border: none;
|
||||||
|
background: none;
|
||||||
|
outline: none;
|
||||||
|
box-shadow: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.kv-hidden, .file-caption-icon, .file-zoom-dialog .modal-header:before, .file-zoom-dialog .modal-header:after, .file-input-new .file-preview, .file-input-new .close, .file-input-new .glyphicon-file, .file-input-new .fileinput-remove-button, .file-input-new .fileinput-upload-button, .file-input-new .no-browse .input-group-btn, .file-input-ajax-new .fileinput-remove-button, .file-input-ajax-new .fileinput-upload-button, .file-input-ajax-new .no-browse .input-group-btn, .hide-content .kv-file-content, .is-locked .fileinput-upload-button, .is-locked .fileinput-remove-button {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-file input[type=file], .file-caption-icon, .file-preview .fileinput-remove, .krajee-default .file-thumb-progress, .file-zoom-dialog .btn-navigate, .file-zoom-dialog .floating-buttons {
|
||||||
|
position: absolute;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption-icon .kv-caption-icon {
|
||||||
|
line-height: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-input, .file-loading:before, .btn-file, .file-caption, .file-preview, .krajee-default.file-preview-frame, .krajee-default .file-thumbnail-footer, .file-zoom-dialog .modal-dialog {
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-error-message pre, .file-error-message ul, .krajee-default .file-actions, .krajee-default .file-other-error {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-error-message pre, .file-error-message ul {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-drag-handle, .krajee-default .file-upload-indicator {
|
||||||
|
float: left;
|
||||||
|
margin-top: 10px;
|
||||||
|
width: 16px;
|
||||||
|
height: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-thumb-progress .progress, .krajee-default .file-thumb-progress .progress-bar {
|
||||||
|
height: 11px;
|
||||||
|
font-family: Verdana, Helvetica, sans-serif;
|
||||||
|
font-size: 9px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-thumb-progress .progress, .kv-upload-progress .progress {
|
||||||
|
background-color: #ccc;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-caption-info, .krajee-default .file-size-info {
|
||||||
|
display: block;
|
||||||
|
white-space: nowrap;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
width: 160px;
|
||||||
|
height: 15px;
|
||||||
|
margin: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content > .file-object.type-video, .file-zoom-content > .file-object.type-flash, .file-zoom-content > .file-object.type-image {
|
||||||
|
max-width: 100%;
|
||||||
|
max-height: 100%;
|
||||||
|
width: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content > .file-object.type-video, .file-zoom-content > .file-object.type-flash {
|
||||||
|
height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content > .file-object.type-pdf, .file-zoom-content > .file-object.type-html, .file-zoom-content > .file-object.type-text, .file-zoom-content > .file-object.type-default {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-loading:before {
|
||||||
|
content: " Loading...";
|
||||||
|
display: inline-block;
|
||||||
|
padding-left: 20px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-size: 13px;
|
||||||
|
font-variant: small-caps;
|
||||||
|
color: #999;
|
||||||
|
background: transparent url(../img/loading.gif) top left no-repeat;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-object {
|
||||||
|
margin: 0 0 -5px 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-file {
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-file input[type=file] {
|
||||||
|
top: 0;
|
||||||
|
left: 0;
|
||||||
|
min-width: 100%;
|
||||||
|
min-height: 100%;
|
||||||
|
text-align: right;
|
||||||
|
opacity: 0;
|
||||||
|
background: none repeat scroll 0 0 transparent;
|
||||||
|
cursor: inherit;
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-file ::-ms-browse {
|
||||||
|
font-size: 10000px;
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption .file-caption-name {
|
||||||
|
width: 100%;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
box-shadow: none;
|
||||||
|
border: none;
|
||||||
|
background: none;
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption.icon-visible .file-caption-icon {
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption.icon-visible .file-caption-name {
|
||||||
|
padding-left: 15px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption-icon {
|
||||||
|
left: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-error-message {
|
||||||
|
color: #a94442;
|
||||||
|
background-color: #f2dede;
|
||||||
|
margin: 5px;
|
||||||
|
border: 1px solid #ebccd1;
|
||||||
|
border-radius: 4px;
|
||||||
|
padding: 15px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-error-message pre {
|
||||||
|
margin: 5px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption-disabled {
|
||||||
|
background-color: #eee;
|
||||||
|
cursor: not-allowed;
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview {
|
||||||
|
border-radius: 5px;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
padding: 8px;
|
||||||
|
width: 100%;
|
||||||
|
margin-bottom: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview .btn-xs {
|
||||||
|
padding: 1px 5px;
|
||||||
|
font-size: 12px;
|
||||||
|
line-height: 1.5;
|
||||||
|
border-radius: 3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview .fileinput-remove {
|
||||||
|
top: 1px;
|
||||||
|
right: 1px;
|
||||||
|
line-height: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview .clickable {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview-image {
|
||||||
|
font: 40px Impact, Charcoal, sans-serif;
|
||||||
|
color: #008000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame {
|
||||||
|
margin: 8px;
|
||||||
|
border: 1px solid rgba(0,0,0,0.2);
|
||||||
|
box-shadow: 0 0 10px 0 rgba(0,0,0,0.2);
|
||||||
|
padding: 6px;
|
||||||
|
float: left;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame .kv-file-content {
|
||||||
|
width: 213px;
|
||||||
|
height: 160px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-other-frame {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame .kv-file-content.kv-pdf-rendered {
|
||||||
|
width: 400px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame[data-template="audio"] .kv-file-content {
|
||||||
|
width: 240px;
|
||||||
|
height: 55px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame .file-thumbnail-footer {
|
||||||
|
height: 70px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.file-preview-frame:not(.file-preview-error):hover {
|
||||||
|
border: 1px solid rgba(0,0,0,0.3);
|
||||||
|
box-shadow: 0 0 10px 0 rgba(0,0,0,0.4);
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-text {
|
||||||
|
display: block;
|
||||||
|
color: #428bca;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
|
||||||
|
outline: none;
|
||||||
|
padding: 8px;
|
||||||
|
resize: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-html {
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
padding: 8px;
|
||||||
|
overflow: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-other-icon {
|
||||||
|
font-size: 6em;
|
||||||
|
line-height: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-footer-buttons {
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-footer-caption {
|
||||||
|
display: block;
|
||||||
|
text-align: center;
|
||||||
|
padding-top: 4px;
|
||||||
|
font-size: 11px;
|
||||||
|
color: #777;
|
||||||
|
margin-bottom: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-upload-stats {
|
||||||
|
font-size: 10px;
|
||||||
|
text-align: center;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.kv-upload-progress .file-upload-stats {
|
||||||
|
font-size: 12px;
|
||||||
|
margin: -10px 0 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-error {
|
||||||
|
opacity: 0.65;
|
||||||
|
box-shadow: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-thumb-progress {
|
||||||
|
height: 11px;
|
||||||
|
top: 37px;
|
||||||
|
left: 0;
|
||||||
|
right: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default.kvsortable-ghost {
|
||||||
|
background: #e1edf7;
|
||||||
|
border: 2px solid #a1abff;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-other:hover {
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.krajee-default .file-preview-frame:not(.file-preview-error) .file-footer-caption:hover {
|
||||||
|
color: #000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.kv-upload-progress .progress {
|
||||||
|
height: 20px;
|
||||||
|
margin: 10px 0;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
.kv-upload-progress .progress-bar {
|
||||||
|
height: 20px;
|
||||||
|
font-family: Verdana, Helvetica, sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*noinspection CssOverwrittenProperties*/
|
||||||
|
.file-zoom-dialog .file-other-icon {
|
||||||
|
font-size: 22em;
|
||||||
|
font-size: 50vmin;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .modal-dialog {
|
||||||
|
width: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .modal-header {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: space-between;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .btn-navigate {
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
background: transparent;
|
||||||
|
text-decoration: none;
|
||||||
|
outline: none;
|
||||||
|
opacity: 0.7;
|
||||||
|
top: 45%;
|
||||||
|
font-size: 4em;
|
||||||
|
color: #1c94c4;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .btn-navigate:not([disabled]):hover {
|
||||||
|
outline: none;
|
||||||
|
box-shadow: none;
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .floating-buttons {
|
||||||
|
top: 5px;
|
||||||
|
right: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .btn-navigate[disabled] {
|
||||||
|
opacity: 0.3;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .btn-prev {
|
||||||
|
left: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .btn-next {
|
||||||
|
right: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .kv-zoom-title {
|
||||||
|
font-weight: 300;
|
||||||
|
color: #999;
|
||||||
|
max-width: 50%;
|
||||||
|
overflow: hidden;
|
||||||
|
white-space: nowrap;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-input-new .no-browse .form-control {
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-right-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-input-ajax-new .no-browse .form-control {
|
||||||
|
border-top-right-radius: 4px;
|
||||||
|
border-bottom-right-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-caption-main {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-thumb-loading {
|
||||||
|
background: transparent url(../img/loading.gif) no-repeat scroll center center content-box !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-drop-zone {
|
||||||
|
border: 1px dashed #aaa;
|
||||||
|
border-radius: 4px;
|
||||||
|
text-align: center;
|
||||||
|
vertical-align: middle;
|
||||||
|
margin: 12px 15px 12px 12px;
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-drop-zone.clickable:hover {
|
||||||
|
border: 2px dashed #999;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-drop-zone.clickable:focus {
|
||||||
|
border: 2px solid #5acde2;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-drop-zone .file-preview-thumbnails {
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-drop-zone-title {
|
||||||
|
color: #aaa;
|
||||||
|
font-size: 1.6em;
|
||||||
|
padding: 85px 10px;
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-highlighted {
|
||||||
|
border: 2px dashed #999 !important;
|
||||||
|
background-color: #eee;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-uploading {
|
||||||
|
background: url(../img/loading-sm.gif) no-repeat center bottom 10px;
|
||||||
|
opacity: 0.65;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-fullscreen .modal-dialog {
|
||||||
|
min-width: 100%;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-fullscreen .modal-content {
|
||||||
|
border-radius: 0;
|
||||||
|
box-shadow: none;
|
||||||
|
min-height: 100vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-fullscreen .modal-body {
|
||||||
|
overflow-y: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.floating-buttons {
|
||||||
|
z-index: 3000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.floating-buttons .btn-kv {
|
||||||
|
margin-left: 3px;
|
||||||
|
z-index: 3000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.kv-zoom-actions .btn-kv {
|
||||||
|
margin-left: 3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content {
|
||||||
|
height: 480px;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content .file-preview-image {
|
||||||
|
max-height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content .file-preview-video {
|
||||||
|
max-height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content > .file-object.type-image {
|
||||||
|
height: auto;
|
||||||
|
min-height: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-content > .file-object.type-audio {
|
||||||
|
width: auto;
|
||||||
|
height: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (min-width: 576px) {
|
||||||
|
.file-zoom-dialog .modal-dialog {
|
||||||
|
max-width: 500px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (min-width: 992px) {
|
||||||
|
.file-zoom-dialog .modal-lg {
|
||||||
|
max-width: 800px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 767px) {
|
||||||
|
.file-preview-thumbnails {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-zoom-dialog .modal-header {
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 350px) {
|
||||||
|
.krajee-default.file-preview-frame:not([data-template="audio"]) .kv-file-content {
|
||||||
|
width: 160px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 420px) {
|
||||||
|
.krajee-default.file-preview-frame .kv-file-content.kv-pdf-rendered {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-loading[dir=rtl]:before {
|
||||||
|
background: transparent url(../img/loading.gif) top right no-repeat;
|
||||||
|
padding-left: 0;
|
||||||
|
padding-right: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-sortable .file-drag-handle {
|
||||||
|
cursor: move;
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-sortable .file-drag-handle:hover {
|
||||||
|
opacity: 0.7;
|
||||||
|
}
|
||||||
|
|
||||||
|
.clickable .file-drop-zone-title {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-preview-initial.sortable-chosen {
|
||||||
|
background-color: #d9edf7;
|
||||||
|
}
|
12
static/css/fileinput.min.css
vendored
Normal file
12
static/css/fileinput.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
59
static/css/homePage.css
Normal file
59
static/css/homePage.css
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
div.file-drop-zone{
|
||||||
|
height: 550px ;
|
||||||
|
}
|
||||||
|
div.file-drop-zone-title{
|
||||||
|
height: 520px;
|
||||||
|
text-align: center;
|
||||||
|
vertical-align: center;
|
||||||
|
line-height: 320px;
|
||||||
|
}
|
||||||
|
.img-thumbnail{
|
||||||
|
padding: 18px;
|
||||||
|
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
|
||||||
|
}
|
||||||
|
.text_box_title{
|
||||||
|
font-size: 35px;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
hr{
|
||||||
|
margin-top: 10px;
|
||||||
|
border-top: 1px solid darkgray;
|
||||||
|
}
|
||||||
|
.response_title{
|
||||||
|
font-weight: bold;
|
||||||
|
color: darkslategray;
|
||||||
|
margin-top: 15px;
|
||||||
|
}
|
||||||
|
.response_number{
|
||||||
|
color: dimgray;
|
||||||
|
margin-left: 20px;
|
||||||
|
}
|
||||||
|
.file-preview-thumbnails{
|
||||||
|
margin: 10%;
|
||||||
|
height: 80%;
|
||||||
|
}
|
||||||
|
.krajee-default.file-preview-frame{
|
||||||
|
margin: 2%;
|
||||||
|
height: 96%;
|
||||||
|
width: 96%;
|
||||||
|
}
|
||||||
|
.krajee-default.file-preview-frame .kv-file-content{
|
||||||
|
width: 100%;
|
||||||
|
height: 81%;
|
||||||
|
}
|
||||||
|
.krajee-default.file-preview-frame .file-thumbnail-footer{
|
||||||
|
height: 40px;
|
||||||
|
}
|
||||||
|
.file-preview-image{
|
||||||
|
width: 88% !important;
|
||||||
|
height: 88% !important;
|
||||||
|
margin: 6%;
|
||||||
|
object-fit: contain;
|
||||||
|
}
|
||||||
|
.krajee-default .file-footer-caption{
|
||||||
|
margin-bottom: 10px;
|
||||||
|
}
|
||||||
|
#predicted-img {
|
||||||
|
width: 100%;
|
||||||
|
height: 81%;
|
||||||
|
}
|
BIN
static/img/cat_smile.jpg
Normal file
BIN
static/img/cat_smile.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 66 KiB |
BIN
static/img/dog_smile.jpg
Normal file
BIN
static/img/dog_smile.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 151 KiB |
BIN
static/img/loading-sm.gif
Normal file
BIN
static/img/loading-sm.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.6 KiB |
BIN
static/img/loading.gif
Normal file
BIN
static/img/loading.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 847 B |
2377
static/js/bootstrap.js
vendored
Normal file
2377
static/js/bootstrap.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7
static/js/bootstrap.min.js
vendored
Normal file
7
static/js/bootstrap.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
5746
static/js/fileinput.js
Normal file
5746
static/js/fileinput.js
Normal file
File diff suppressed because it is too large
Load Diff
13
static/js/fileinput.min.js
vendored
Normal file
13
static/js/fileinput.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
2
static/js/jQuery.min.js
vendored
Normal file
2
static/js/jQuery.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
62
templates/base.html
Normal file
62
templates/base.html
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>Pet Smile</title>
|
||||||
|
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
|
||||||
|
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
|
||||||
|
<link href="/static/css/fileinput.min.css" media="all" rel="stylesheet" type="text/css"/>
|
||||||
|
<link rel="stylesheet" href="/static/css/homePage.css" type="text/css">
|
||||||
|
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
|
||||||
|
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
|
||||||
|
crossorigin="anonymous"></script>
|
||||||
|
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
|
||||||
|
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
|
||||||
|
crossorigin="anonymous"></script>
|
||||||
|
<script src="/static/js/fileinput.js"></script>
|
||||||
|
{# chart js#}
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.5.0/Chart.min.js"></script>
|
||||||
|
</head>
|
||||||
|
<body style="background: rgb( 246,247,249); margin-bottom: 20px">
|
||||||
|
<!-- main -->
|
||||||
|
<nav class="navbar navbar-expand-lg navbar-light bg-light" style="background-color: #e3f2fd; margin-bottom: 30px; width: 100%">
|
||||||
|
<a class="navbar-brand" href="/">
|
||||||
|
<div style="font-size: 45px; margin-left: 30px">
|
||||||
|
<span style="font-weight: bold">Happy</span>Pet
|
||||||
|
</div>
|
||||||
|
</a>
|
||||||
|
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarNavAltMarkup"
|
||||||
|
aria-controls="navbarNavAltMarkup" aria-expanded="false" aria-label="Toggle navigation">
|
||||||
|
<span class="navbar-toggler-icon"></span>
|
||||||
|
</button>
|
||||||
|
<div class="collapse navbar-collapse" id="navbarNavAltMarkup">
|
||||||
|
<div class="navbar-nav">
|
||||||
|
<a class="nav-item nav-link active" href="/">Home</a>
|
||||||
|
<a class="nav-item nav-link" href="/breed-list">Breed Info</a>
|
||||||
|
<a class="nav-item nav-link" href="/statistical-data">Statistical Results</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
{% block mainContent %}{% endblock %}
|
||||||
|
<div style="text-align: center; font-weight: lighter; margin-top: 20px">
|
||||||
|
<div id="app-developBy"></div>
|
||||||
|
<div id="app-description"></div>
|
||||||
|
<div id="app-contact"></div>
|
||||||
|
</div>
|
||||||
|
<script>
|
||||||
|
function getAppInfo() {
|
||||||
|
$.get("/api/get-app-info", function (response) {
|
||||||
|
const appDevelopByBlock = document.getElementById('app-developBy')
|
||||||
|
const appDescriptionBlock = document.getElementById('app-description')
|
||||||
|
const appContactBlock = document.getElementById('app-contact')
|
||||||
|
appDevelopByBlock.innerHTML = response['developedByHTML']
|
||||||
|
appDescriptionBlock.innerHTML = response['descriptionHTML']
|
||||||
|
appContactBlock.innerHTML = response['contactHTML']
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onload = getAppInfo
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
32
templates/breedPage.html
Normal file
32
templates/breedPage.html
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
{% extends 'base.html' %}
|
||||||
|
{% block mainContent %}
|
||||||
|
<div id="breed-table-area" style="margin-left: 50px; margin-right: 50px;">
|
||||||
|
</div>
|
||||||
|
<script>
|
||||||
|
function getBreedList() {
|
||||||
|
$.get("/api/get-breed-info", function(response) {
|
||||||
|
let breedTableArea = document.getElementById('breed-table-area')
|
||||||
|
breedTableArea.innerHTML = ''
|
||||||
|
let htmlContent = ''
|
||||||
|
|
||||||
|
for (let animalType in response) {
|
||||||
|
htmlContent += '<h2 style="text-align: center">' + animalType + '</h2>' + '<table class="table table-striped"><thead><tr><th scope="col">Breed</th><th scope="col">Image</th><th scope="col">Description</th><th scope="col">URL</th></tr></thead><tbody>'
|
||||||
|
|
||||||
|
response[animalType].forEach(eachBreed => {
|
||||||
|
htmlContent += '<tr>'
|
||||||
|
htmlContent += '<td>' + eachBreed['breed'] + '</td>'
|
||||||
|
htmlContent += `<td><img src=${eachBreed['image']} alt=${eachBreed['image']} style="height: 100px; width: 100px"></td>`
|
||||||
|
htmlContent += '<td>' + eachBreed['description'] + '</td>'
|
||||||
|
htmlContent += `<td><a href=${eachBreed['link']} target="_blank">wikipedia link</a></td></tr>`
|
||||||
|
})
|
||||||
|
|
||||||
|
htmlContent += '</tbody></table>'
|
||||||
|
}
|
||||||
|
|
||||||
|
htmlContent += '<p style="text-align: end">(all results are downloaded from wikipedia)</p>'
|
||||||
|
breedTableArea.innerHTML = htmlContent;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
getBreedList();
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
90
templates/homePage.html
Normal file
90
templates/homePage.html
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
{% extends 'base.html' %}
|
||||||
|
{% block mainContent %}
|
||||||
|
<div class="container">
|
||||||
|
<div class="row" style="align-self: center">
|
||||||
|
<!--left col -->
|
||||||
|
<div class="col-lg-6 col-sm-12">
|
||||||
|
<input id="file-input" type="file" class="file input-lg" data-preview-file-type="text">
|
||||||
|
</div>
|
||||||
|
<!-- right col-->
|
||||||
|
<div class="col-lg-6 col-sm-12" id="div_prediction">
|
||||||
|
<div class="card">
|
||||||
|
<!--predicted image-->
|
||||||
|
<img class="card-img-top" id="predicted-img" alt="predicted picture">
|
||||||
|
<div class="card-body">
|
||||||
|
<!--prediction results-->
|
||||||
|
<div class="card-text" id="predicted-results"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div style="text-align: center; font-weight: lighter; margin-top: 20px">
|
||||||
|
<p id="app-developBy"></p>
|
||||||
|
<p id="app-description"></p>
|
||||||
|
<p id="app-contact"></p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- loading gif-->
|
||||||
|
<img src="static/img/loading-sm.gif" id="loading_image" alt="loading"
|
||||||
|
style="position: absolute; left: 73%; top: 48%; z-index: 2; width: 25px">
|
||||||
|
<script>
|
||||||
|
$("#file-input").fileinput({'showUpload':true, 'previewFileType':'any', 'autoOrientImage':false});
|
||||||
|
// hide prediction and loading image at beginning
|
||||||
|
$("#loading_image").fadeOut(0);
|
||||||
|
$("#div_prediction").fadeOut(0);
|
||||||
|
|
||||||
|
|
||||||
|
//when click upload
|
||||||
|
$('.fileinput-upload-button').click(function (event) {
|
||||||
|
$("#div_prediction").fadeOut(1000);
|
||||||
|
setTextEmpty();
|
||||||
|
$('#loading_image').fadeIn(1500);
|
||||||
|
sendMessage();
|
||||||
|
});
|
||||||
|
|
||||||
|
//when click remove
|
||||||
|
$('.fileinput-remove-button').click(function (event) {
|
||||||
|
$("#div_prediction").fadeOut(1000);
|
||||||
|
setTextEmpty();
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
// send image to server and get response
|
||||||
|
function sendMessage() {
|
||||||
|
let dataURL;
|
||||||
|
dataURL = $('.file-preview-image').attr("src");
|
||||||
|
base64Image = dataURL.replace("data:image/jpeg;base64,","");
|
||||||
|
base64Image = base64Image.replace("data:image/png;base64,","");
|
||||||
|
let message = {
|
||||||
|
image: base64Image
|
||||||
|
}
|
||||||
|
|
||||||
|
$.post("/api/predict",JSON.stringify(message), function(response){
|
||||||
|
let resultArea = document.getElementById('predicted-results')
|
||||||
|
resultArea.innerHTML = ''
|
||||||
|
let predictedImg = document.getElementById('predicted-img')
|
||||||
|
predictedImg.src = 'data:image/(jpeg|png);charset=utf-8;base64,'+response['img_str']
|
||||||
|
$('#loading_image').fadeOut(300);
|
||||||
|
$("#div_prediction").fadeIn(1500);
|
||||||
|
if (response['status'] === 'success') {
|
||||||
|
for (let i=0; i<response['results'].length; i++) {
|
||||||
|
const addedId = `<div style=\"border: 1px dashed darkgrey\"><ul><li><strong>ID: </strong>${response['results'][i].id}`
|
||||||
|
const emotionList = `<div style=\"margin: 10px 30px; font-weight: lighter;\"><ul><li>${response['results'][i].emotion['allEmotions'][0]}</li><li>${response['results'][i].emotion['allEmotions'][1]}</li><li>${response['results'][i].emotion['allEmotions'][2]}</li><li>${response['results'][i].emotion['allEmotions'][3]}</li><li>${response['results'][i].emotion['allEmotions'][4]}</li></ul></div>`
|
||||||
|
const otherInfo = `</li><li><strong>Type: </strong>${response['results'][i].pet}</li><li><strong>Breed: </strong>${response['results'][i].breed}</li><li><strong>Emotion: </strong>${response['results'][i].emotion['mostLikely']}</li></ul>${emotionList}</div>`
|
||||||
|
const addedHtml = response['isShowId'] !== 'false' ? addedId + otherInfo : otherInfo
|
||||||
|
resultArea.innerHTML += addedHtml
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let resultArea = document.getElementById('predicted-results')
|
||||||
|
resultArea.innerHTML += response['message']
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
//set emotion and breed text to empty
|
||||||
|
function setTextEmpty() {
|
||||||
|
let resultArea = document.getElementById('predicted-results')
|
||||||
|
resultArea.innerHTML = ''
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
263
templates/statisticalPage.html
Normal file
263
templates/statisticalPage.html
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
{% extends 'base.html' %}
|
||||||
|
{% block mainContent %}
|
||||||
|
<div class="container">
|
||||||
|
<h1>General information: </h1>
|
||||||
|
<div class="row">
|
||||||
|
<div style="width: 800px; margin: 5px auto;" class="col-lg-6 col-sm-12">
|
||||||
|
<canvas id="numberOfTotalPrediction" width="8" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style="width: 800px; margin: 5px auto;" class="col-lg-6 col-sm-12">
|
||||||
|
<canvas id="numberOfTotalFeedback" width="8" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<div style="width: 800px; margin: 5px auto;">
|
||||||
|
<canvas id="numberOfPhotoByDate" width="8" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<h1>Cat: </h1>
|
||||||
|
<div style="width: 800px; margin: 5px auto">
|
||||||
|
<canvas id="numberOfCatBreedPredictionChart" width="8" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<div style="width: 800px; margin: 5px auto">
|
||||||
|
<canvas id="numberOfCatBreedFeedbackChart" width="8" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<h1>Dog: </h1>
|
||||||
|
<div style="overflow-x: auto">
|
||||||
|
<div style="width: 1300px; margin: 5px auto; overflow-x: auto">
|
||||||
|
<canvas id="numberOfDogBreedPredictionChart" width="9" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<div style="overflow-x: auto">
|
||||||
|
<div style="width: 1300px; margin: 5px auto; overflow-x: auto">
|
||||||
|
<canvas id="numberOfDogBreedFeedbackChart" width="9" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<h1>Emotion: </h1>
|
||||||
|
<div style="width: 800px; margin: 5px auto;">
|
||||||
|
<canvas id="numberOfEmotionPredictionChart" width="9" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-lg-6 col-sm-12" style="width: 800px; margin: 5px auto;">
|
||||||
|
<canvas id="catEmotionFeedbackChart" width="9" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
<div class="col-lg-6 col-sm-12" style="width: 800px; margin: 5px auto;">
|
||||||
|
<canvas id="dogEmotionFeedbackChart" width="9" height="5"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<script>
|
||||||
|
function generalBackgroundColor(number) {
|
||||||
|
let result = []
|
||||||
|
for (let i = 0; i < number; i++) {
|
||||||
|
let color = "rgba(" + Math.floor(Math.random() * 256) + "," + Math.floor(Math.random() * 256) + "," + Math.floor(Math.random() * 256) + "," + Math.floor(Math.random() * 256) + ")"
|
||||||
|
result.push(color)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawDoughunt(canvas, data, title) {
|
||||||
|
new Chart(canvas, {
|
||||||
|
type: 'doughnut',
|
||||||
|
data: {
|
||||||
|
labels: Object.keys(data),
|
||||||
|
datasets: [{
|
||||||
|
data: Object.values(data),
|
||||||
|
backgroundColor: ['rgb(255, 164, 128)', 'rgb(40, 143, 56)'],
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
title: {display: true, text: title, fontSize: 30},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawLine(canvas, data, title) {
|
||||||
|
new Chart(canvas, {
|
||||||
|
type: 'line',
|
||||||
|
data: {
|
||||||
|
labels: Object.keys(data),
|
||||||
|
datasets: [{
|
||||||
|
data: Object.values(data),
|
||||||
|
label: 'Number',
|
||||||
|
borderColor: "#3e95cd",
|
||||||
|
fill: false
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
title: {display: true, text: title, fontSize: 30},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawVerticalBar(canvas, data, title) {
|
||||||
|
new Chart(canvas, {
|
||||||
|
type: 'bar',
|
||||||
|
data: {
|
||||||
|
labels: Object.keys(data),
|
||||||
|
datasets: [{
|
||||||
|
label: 'Number',
|
||||||
|
data: Object.values(data),
|
||||||
|
backgroundColor:
|
||||||
|
generalBackgroundColor(Object.keys(data).length),
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
title: {display: true, text: title, fontSize: 30},
|
||||||
|
legend: {display: false},
|
||||||
|
scales: {
|
||||||
|
yAxes: [{
|
||||||
|
ticks: {
|
||||||
|
beginAtZero: true,
|
||||||
|
fontSize: 20
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
xAxes: [{
|
||||||
|
ticks: {
|
||||||
|
autoSkip: false,
|
||||||
|
fontSize: 20
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawVerticalStackBar(canvas, dataA, dataB, title, labels) {
|
||||||
|
new Chart(canvas, {
|
||||||
|
type: 'bar',
|
||||||
|
data: {
|
||||||
|
labels: Object.keys(dataA),
|
||||||
|
datasets: [{
|
||||||
|
label: labels[0],
|
||||||
|
data: Object.values(dataA),
|
||||||
|
backgroundColor:
|
||||||
|
generalBackgroundColor(1)[0],
|
||||||
|
}, {
|
||||||
|
label: labels[1],
|
||||||
|
data: Object.values(dataB),
|
||||||
|
backgroundColor:
|
||||||
|
generalBackgroundColor(1)[0],
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
title: {display: true, text: title, fontSize: 30},
|
||||||
|
legend: {display: true},
|
||||||
|
scales: {
|
||||||
|
xAxes: [{
|
||||||
|
stacked: true, ticks: {
|
||||||
|
autoSkip: false,
|
||||||
|
fontSize: 20
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
yAxes: [{
|
||||||
|
stacked: true, ticks: {
|
||||||
|
autoSkip: false,
|
||||||
|
fontSize: 20
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawComparisonBar(canvas, labels, dataA, dataB, title) {
|
||||||
|
new Chart(canvas, {
|
||||||
|
type: 'bar',
|
||||||
|
data: {
|
||||||
|
labels: labels,
|
||||||
|
datasets: [{
|
||||||
|
label: 'Cat',
|
||||||
|
data: Object.values(dataA),
|
||||||
|
backgroundColor:
|
||||||
|
'rgba(255, 99, 132, 0.2)',
|
||||||
|
order: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'Dog',
|
||||||
|
data: Object.values(dataB),
|
||||||
|
type: 'bar',
|
||||||
|
backgroundColor:
|
||||||
|
'rgba(54, 162, 235, 0.2)',
|
||||||
|
order: 2
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
title: {display: true, text: title},
|
||||||
|
legend: {display: true},
|
||||||
|
scales: {
|
||||||
|
yAxes: [{
|
||||||
|
ticks: {
|
||||||
|
beginAtZero: true,
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTotalPrediction() {
|
||||||
|
$.get("/api/get-statistical-results", function (response) {
|
||||||
|
|
||||||
|
{#total prediction number#}
|
||||||
|
let ctxForTotalPredictionNumber = document.getElementById('numberOfTotalPrediction').getContext('2d');
|
||||||
|
let totalPredictionNumberData = {
|
||||||
|
"Cat": response['totalNumberOfCatPrediction'],
|
||||||
|
"Dog": response['totalNumberOfDogPrediction']
|
||||||
|
}
|
||||||
|
let totalPredictionCount = response['totalNumberOfCatPrediction'] + response['totalNumberOfDogPrediction']
|
||||||
|
let totalNumberOfPhotoUploaded = response['totalNumberOfPhotoUploaded']
|
||||||
|
drawDoughunt(ctxForTotalPredictionNumber, totalPredictionNumberData, `Total prediction number: ${totalPredictionCount} pets / ${totalNumberOfPhotoUploaded} photos`)
|
||||||
|
|
||||||
|
{#total feedback number#}
|
||||||
|
let ctxForTotalFeedbackNumber = document.getElementById('numberOfTotalFeedback').getContext('2d');
|
||||||
|
let totalFeedbackNumberData = {
|
||||||
|
"Cat": response['totalNumberOfCatFeedback'],
|
||||||
|
"Dog": response['totalNumberOfDogFeedback']
|
||||||
|
}
|
||||||
|
let totalFeedbackCount = response['totalNumberOfCatFeedback'] + response['totalNumberOfDogFeedback']
|
||||||
|
drawDoughunt(ctxForTotalFeedbackNumber, totalFeedbackNumberData, `Total feedback number: ${totalFeedbackCount}`)
|
||||||
|
|
||||||
|
{#number of photo by date#}
|
||||||
|
let ctxForNumberOfPhotoByDate = document.getElementById('numberOfPhotoByDate').getContext('2d');
|
||||||
|
drawLine(ctxForNumberOfPhotoByDate, response['numberOfPhotoByDate'], 'Number of Photos within a week')
|
||||||
|
|
||||||
|
{#cat breed prediction#}
|
||||||
|
let ctxForCatBreedPredictionNumber = document.getElementById('numberOfCatBreedPredictionChart').getContext('2d');
|
||||||
|
drawVerticalBar(ctxForCatBreedPredictionNumber, response['totalNumberOfCatBreedPrediction'], "Cat breed prediction")
|
||||||
|
|
||||||
|
{#cat breed feedback#}
|
||||||
|
let ctxForCatBreedFeedback = document.getElementById('numberOfCatBreedFeedbackChart').getContext('2d');
|
||||||
|
drawVerticalStackBar(ctxForCatBreedFeedback, response['totalNumberOfCatBreedCorrectFeedback'], response['totalNumberOfCatBreedWrongFeedback'], "Cat breed feedback", ['Correct', 'Wrong'])
|
||||||
|
|
||||||
|
{#dog breed prediction#}
|
||||||
|
let ctxForDogBreedPredictionNumber = document.getElementById('numberOfDogBreedPredictionChart').getContext('2d');
|
||||||
|
drawVerticalBar(ctxForDogBreedPredictionNumber, response['totalNumberOfDogBreedPrediction'], "Dog breed prediction")
|
||||||
|
|
||||||
|
{#dog breed feedback#}
|
||||||
|
let ctxForDogBreedFeedback = document.getElementById('numberOfDogBreedFeedbackChart').getContext('2d');
|
||||||
|
drawVerticalStackBar(ctxForDogBreedFeedback, response['totalNumberOfDogBreedCorrectFeedback'], response['totalNumberOfDogBreedWrongFeedback'], "Dog breed feedback", ['Correct', 'Wrong'])
|
||||||
|
|
||||||
|
{#emotion prediction#}
|
||||||
|
let ctxForEmotionPredictionNumber = document.getElementById('numberOfEmotionPredictionChart').getContext('2d');
|
||||||
|
drawVerticalStackBar(ctxForEmotionPredictionNumber, response['totalNumberOfCatEmotionPrediction'], response['totalNumberOfDogEmotionPrediction'], "Emotion prediction", ['Cat', 'Dog'])
|
||||||
|
|
||||||
|
{#cat emotion feedback#}
|
||||||
|
let ctxForCatEmotionFeedback = document.getElementById('catEmotionFeedbackChart').getContext('2d');
|
||||||
|
drawVerticalStackBar(ctxForCatEmotionFeedback, response['totalNumberOfCatEmotionCorrectFeedback'], response['totalNumberOfCatEmotionWrongFeedback'], "Emotion feedback for cat", ['Correct', 'Wrong'])
|
||||||
|
|
||||||
|
{#dog emotion feedback#}
|
||||||
|
let ctxForDogEmotionFeedback = document.getElementById('dogEmotionFeedbackChart').getContext('2d');
|
||||||
|
drawVerticalStackBar(ctxForDogEmotionFeedback, response['totalNumberOfDogEmotionCorrectFeedback'], response['totalNumberOfDogEmotionWrongFeedback'], "Emotion feedback for dog", ['Correct', 'Wrong'])
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
getTotalPrediction()
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
4
views/__init__.py
Normal file
4
views/__init__.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
from views.public_route import public_route_bp
|
||||||
|
|
||||||
|
public_route_bp = public_route_bp
|
||||||
|
|
18
views/public_route.py
Normal file
18
views/public_route.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
from flask import Blueprint, render_template
|
||||||
|
|
||||||
|
public_route_bp = Blueprint('public_route', __name__)
|
||||||
|
|
||||||
|
|
||||||
|
@public_route_bp.route('/')
|
||||||
|
def render_homepage():
|
||||||
|
return render_template("homePage.html")
|
||||||
|
|
||||||
|
|
||||||
|
@public_route_bp.route('/breed-list')
|
||||||
|
def render_breed_info_page():
|
||||||
|
return render_template('breedPage.html')
|
||||||
|
|
||||||
|
|
||||||
|
@public_route_bp.route('/statistical-data')
|
||||||
|
def render_statistical_page():
|
||||||
|
return render_template('statisticalPage.html')
|
278
yolov3_with_emo.py
Normal file
278
yolov3_with_emo.py
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Class definition of YOLO_v3 style detection model on image and video
|
||||||
|
"""
|
||||||
|
|
||||||
|
import colorsys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from extension.constants import CONSTANTS
|
||||||
|
import numpy as np
|
||||||
|
from keras import backend as K
|
||||||
|
from keras.models import load_model
|
||||||
|
from keras.layers import Input
|
||||||
|
from PIL import Image, ImageFont, ImageDraw
|
||||||
|
|
||||||
|
from yolo3.model import yolo_eval, yolo_body
|
||||||
|
from yolo3.utils import letterbox_image
|
||||||
|
import os
|
||||||
|
from keras.utils import multi_gpu_model
|
||||||
|
|
||||||
|
# for tensorflow 2.0 emotion model
|
||||||
|
import tensorflow as tf
|
||||||
|
from tensorflow.keras.applications.xception import preprocess_input
|
||||||
|
from keras.preprocessing import image as keras_Image
|
||||||
|
|
||||||
|
# nms code from deepsort
|
||||||
|
from yolo3.preprocessing import non_max_suppression
|
||||||
|
|
||||||
|
|
||||||
|
class YOLO(object):
|
||||||
|
emotion_dict = {0: CONSTANTS['EMOTION']['UNSETTLED'], 1: CONSTANTS['EMOTION']['HAPPY'],
|
||||||
|
2: CONSTANTS['EMOTION']['NEUTRAL'], 3: CONSTANTS['EMOTION']['SAD'],
|
||||||
|
4: CONSTANTS['EMOTION']['ANXIOUS']}
|
||||||
|
|
||||||
|
_defaults = {
|
||||||
|
"model_path": 'yolov3_all/trained_weights_final.h5',
|
||||||
|
"anchors_path": 'model_data/yolo3_anchors.txt',
|
||||||
|
"classes_path": 'model_data/pet_classes.txt',
|
||||||
|
"emotion_model_dir": "emotion/",
|
||||||
|
"score": 0.3,
|
||||||
|
"iou": 0.45,
|
||||||
|
"model_image_size": (608, 608),
|
||||||
|
"gpu_num": 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_defaults(cls, n):
|
||||||
|
if n in cls._defaults:
|
||||||
|
return cls._defaults[n]
|
||||||
|
else:
|
||||||
|
return "Unrecognized attribute name '" + n + "'"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.__dict__.update(self._defaults) # set up default values
|
||||||
|
self.__dict__.update(kwargs) # and update with user overrides
|
||||||
|
self.class_names = self._get_class()
|
||||||
|
self.anchors = self._get_anchors()
|
||||||
|
self.sess = K.get_session()
|
||||||
|
|
||||||
|
# init breed model
|
||||||
|
self.boxes, self.scores, self.classes = self.generate()
|
||||||
|
|
||||||
|
# init emotion models
|
||||||
|
self.cat_emotion_model, self.dog_emotion_model = self.load_emotion_models()
|
||||||
|
|
||||||
|
def _get_class(self):
|
||||||
|
classes_path = os.path.expanduser(self.classes_path)
|
||||||
|
with open(classes_path) as f:
|
||||||
|
class_names = f.readlines()
|
||||||
|
class_names = [c.strip() for c in class_names]
|
||||||
|
return class_names
|
||||||
|
|
||||||
|
def _get_anchors(self):
|
||||||
|
anchors_path = os.path.expanduser(self.anchors_path)
|
||||||
|
with open(anchors_path) as f:
|
||||||
|
anchors = f.readline()
|
||||||
|
anchors = [float(x) for x in anchors.split(',')]
|
||||||
|
return np.array(anchors).reshape(-1, 2)
|
||||||
|
|
||||||
|
def load_emotion_models(self):
|
||||||
|
cat_model_path = os.path.join(self.emotion_model_dir, "Cat_classifier.h5")
|
||||||
|
dog_model_path = os.path.join(self.emotion_model_dir, "Dog_classifier.h5")
|
||||||
|
|
||||||
|
cat = tf.keras.models.load_model(cat_model_path)
|
||||||
|
self.graph_cat = tf.get_default_graph()
|
||||||
|
|
||||||
|
dog = tf.keras.models.load_model(dog_model_path)
|
||||||
|
self.graph_dog = tf.get_default_graph()
|
||||||
|
|
||||||
|
cat.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
||||||
|
dog.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
||||||
|
|
||||||
|
return cat, dog
|
||||||
|
|
||||||
|
def generate(self):
|
||||||
|
model_path = os.path.expanduser(self.model_path)
|
||||||
|
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
|
||||||
|
|
||||||
|
# Load model, or construct model and load weights.
|
||||||
|
num_anchors = len(self.anchors)
|
||||||
|
num_classes = len(self.class_names)
|
||||||
|
try:
|
||||||
|
self.yolo_model = load_model(model_path, compile=False)
|
||||||
|
except:
|
||||||
|
self.yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)
|
||||||
|
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
|
||||||
|
else:
|
||||||
|
assert self.yolo_model.layers[-1].output_shape[-1] == \
|
||||||
|
num_anchors / len(self.yolo_model.output) * (num_classes + 5), \
|
||||||
|
'Mismatch between model and given anchor and class sizes'
|
||||||
|
|
||||||
|
print('{} model, anchors, and classes loaded.'.format(model_path))
|
||||||
|
|
||||||
|
# Generate colors for drawing bounding boxes.
|
||||||
|
hsv_tuples = [(x / len(self.class_names), 1., 1.)
|
||||||
|
for x in range(len(self.class_names))]
|
||||||
|
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
|
||||||
|
self.colors = list(
|
||||||
|
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
|
||||||
|
self.colors))
|
||||||
|
np.random.seed(10101) # Fixed seed for consistent colors across runs.
|
||||||
|
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
|
||||||
|
np.random.seed(None) # Reset seed to default.
|
||||||
|
|
||||||
|
# Generate output tensor targets for filtered bounding boxes.
|
||||||
|
self.input_image_shape = K.placeholder(shape=(2,))
|
||||||
|
if self.gpu_num >= 2:
|
||||||
|
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
|
||||||
|
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
|
||||||
|
len(self.class_names), self.input_image_shape,
|
||||||
|
score_threshold=self.score, iou_threshold=self.iou)
|
||||||
|
return boxes, scores, classes
|
||||||
|
|
||||||
|
def detect_image(self, image):
|
||||||
|
|
||||||
|
if self.model_image_size != (None, None):
|
||||||
|
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
|
||||||
|
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
|
||||||
|
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
|
||||||
|
else:
|
||||||
|
new_image_size = (image.width - (image.width % 32),
|
||||||
|
image.height - (image.height % 32))
|
||||||
|
boxed_image = letterbox_image(image, new_image_size)
|
||||||
|
image_data = np.array(boxed_image, dtype='float32')
|
||||||
|
|
||||||
|
image_data /= 255.
|
||||||
|
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
|
||||||
|
|
||||||
|
out_boxes, out_scores, out_classes = self.sess.run(
|
||||||
|
[self.boxes, self.scores, self.classes],
|
||||||
|
feed_dict={
|
||||||
|
self.yolo_model.input: image_data,
|
||||||
|
self.input_image_shape: [image.size[1], image.size[0]]
|
||||||
|
# K.learning_phase(): 0
|
||||||
|
})
|
||||||
|
|
||||||
|
# non-maxima suppression ACROSS classes
|
||||||
|
indices = non_max_suppression(np.array(out_boxes),
|
||||||
|
self.iou,
|
||||||
|
out_scores)
|
||||||
|
|
||||||
|
out_boxes = [out_boxes[i] for i in indices]
|
||||||
|
out_scores = [out_scores[i] for i in indices]
|
||||||
|
out_classes = [out_classes[i] for i in indices]
|
||||||
|
|
||||||
|
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
|
||||||
|
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
|
||||||
|
thickness = (image.size[0] + image.size[1]) // 300
|
||||||
|
|
||||||
|
detection_dicts = []
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
showid = len(out_classes) > 1
|
||||||
|
|
||||||
|
for i, c in reversed(list(enumerate(out_classes))):
|
||||||
|
|
||||||
|
# prediction of breed and bounding box
|
||||||
|
predicted_class = self.class_names[c]
|
||||||
|
box = out_boxes[i]
|
||||||
|
score = out_scores[i]
|
||||||
|
|
||||||
|
# bounding box corners
|
||||||
|
top, left, bottom, right = box
|
||||||
|
top = max(0, np.floor(top + 0.5).astype('int32'))
|
||||||
|
left = max(0, np.floor(left + 0.5).astype('int32'))
|
||||||
|
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
|
||||||
|
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
|
||||||
|
|
||||||
|
# crop image head and pass to emotion model
|
||||||
|
head_image = image.crop([left, top, right, bottom])
|
||||||
|
head_image = head_image.resize((200, 200))
|
||||||
|
head_image = keras_Image.img_to_array(head_image)
|
||||||
|
head_image = np.expand_dims(head_image, axis=0)
|
||||||
|
head_image = preprocess_input(head_image)
|
||||||
|
|
||||||
|
if predicted_class[0].islower():
|
||||||
|
# lowercase: dog
|
||||||
|
with self.graph_dog.as_default():
|
||||||
|
prediction = self.dog_emotion_model.predict(head_image)[0]
|
||||||
|
else:
|
||||||
|
# uppercase: cat
|
||||||
|
with self.graph_cat.as_default():
|
||||||
|
prediction = self.cat_emotion_model.predict(head_image)[0]
|
||||||
|
|
||||||
|
prediction = np.round(prediction, decimals=2)
|
||||||
|
# prediction = [float(np.round(p * 100, 2)) for p in prediction]
|
||||||
|
prediction_ = np.argmax(prediction)
|
||||||
|
|
||||||
|
# label
|
||||||
|
label = 'ID: {}'.format(count)
|
||||||
|
draw = ImageDraw.Draw(image)
|
||||||
|
label_size = draw.textsize(label, font=font)
|
||||||
|
|
||||||
|
if top - label_size[1] >= 0:
|
||||||
|
text_origin = np.array([left, top - label_size[1]])
|
||||||
|
else:
|
||||||
|
text_origin = np.array([left, top + 1])
|
||||||
|
|
||||||
|
# My kingdom for a good redistributable image drawing library.
|
||||||
|
for i in range(thickness):
|
||||||
|
draw.rectangle(
|
||||||
|
[left + i, top + i, right - i, bottom - i],
|
||||||
|
outline=self.colors[c])
|
||||||
|
|
||||||
|
if showid:
|
||||||
|
draw.rectangle(
|
||||||
|
[tuple(text_origin), tuple(text_origin + label_size)],
|
||||||
|
fill=self.colors[c]
|
||||||
|
)
|
||||||
|
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
# put results into dict list
|
||||||
|
if predicted_class[0].isupper():
|
||||||
|
pet = "Cat"
|
||||||
|
else:
|
||||||
|
pet = "Dog"
|
||||||
|
|
||||||
|
all_emotions = [
|
||||||
|
{'name': CONSTANTS['EMOTION']['UNSETTLED'], 'value': prediction[0]},
|
||||||
|
{'name': CONSTANTS['EMOTION']['HAPPY'], 'value': prediction[1]},
|
||||||
|
{'name': CONSTANTS['EMOTION']['NEUTRAL'], 'value': prediction[2]},
|
||||||
|
{'name': CONSTANTS['EMOTION']['SAD'], 'value': prediction[3]},
|
||||||
|
{'name': CONSTANTS['EMOTION']['ANXIOUS'], 'value': prediction[4]},
|
||||||
|
]
|
||||||
|
all_emotions.sort(key=lambda x: x['value'], reverse=True)
|
||||||
|
all_emotions_result = []
|
||||||
|
for each_emotion in all_emotions:
|
||||||
|
all_emotions_result.append(
|
||||||
|
each_emotion['name'] + ' (' + str(np.round(each_emotion['value'] * 100)) + '%)')
|
||||||
|
|
||||||
|
predicted_breed = re.sub(r"_", " ", predicted_class, flags=re.IGNORECASE)
|
||||||
|
obj_dict = {
|
||||||
|
'id': count,
|
||||||
|
'pet': pet,
|
||||||
|
'breed': "{} ({}%)".format(predicted_breed.title(), np.round(score * 100)),
|
||||||
|
'breedTitle': predicted_breed.title(),
|
||||||
|
'breedScore': np.round(score * 100),
|
||||||
|
'emotion': {
|
||||||
|
'mostLikelyTitle': self.emotion_dict[prediction_],
|
||||||
|
'mostLikelyScore': np.round(np.max(prediction) * 100),
|
||||||
|
'mostLikely': "{} ({}%)".format(self.emotion_dict[prediction_], np.round(np.max(prediction) * 100)),
|
||||||
|
'allEmotions': all_emotions_result
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
detection_dicts.append(obj_dict)
|
||||||
|
|
||||||
|
# predicted_img_name = "predicted_image.jpg"
|
||||||
|
# image.save(predicted_img_name, "JPEG")
|
||||||
|
|
||||||
|
return image, detection_dicts
|
||||||
|
|
||||||
|
def close_session(self):
|
||||||
|
self.sess.close()
|
Loading…
x
Reference in New Issue
Block a user