김성연

Refactor file configuration and Fix model loader

.ipynb_checkpoints/*
.DS_Store
web/__pycache__/*
web/face_emotion_recognition/__pycache__
web/face_emotion_recogntion/data
web/face_emotion_recogntion/img
web/static/*
\ No newline at end of file
......
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
import sys, os
from face_emotion_recognition import face_recognition
import sys
import os
from flask.helpers import url_for
from face_emotion_recognition import face_recognition, video2
from flask import Flask, render_template
from flask.globals import request
from werkzeug.utils import secure_filename
from werkzeug.utils import redirect, secure_filename
def find_face_imgs():
face_imgs = []
id = 1
for img_name in os.listdir('static/img'):
if(img_name.rsplit('.')[1] == 'jpg' or img_name.rsplit('.')[1] == 'png'):
face_imgs.append(
{
'id': id,
'name': img_name.rsplit('.')[0],
'imgUrl': 'img/' + img_name
})
id += 1
return face_imgs
# Flask 객체 인스턴스 생성
app = Flask(__name__)
@app.route('/', methods=('GET', 'POST')) # 접속하는 url
def index():
if request.method == 'POST':
print(request.form.get('user'))
user = request.form.get('user')
data = {'level' : 50, 'point' : 360, 'exp': 45000}
return render_template('index.html', user = user, data = data)
return render_template('index.html', face_imgs=find_face_imgs())
elif request.method == 'GET':
user = '반원'
data = {'level' : 50, 'point' : 360, 'exp': 45000}
return render_template('index.html', user = user, data=data)
return render_template('index.html', face_imgs=find_face_imgs())
@app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
def test():
if request.method == 'GET':
return render_template('test.html', face_imgs=find_face_imgs())
@app.route('/uploadFace', methods=('GET', 'POST'))
......@@ -28,17 +48,26 @@ def upload_face():
return render_template('upload.html')
elif request.method == 'POST':
f = request.files.get('file')
f.save("./face_emotion_recognition/img/" + secure_filename(f.filename))
f.save("./static/img/" + secure_filename(f.filename))
face_recognition.face_to_npy()
return 'face image uploaded successfully'
return redirect(url_for('index'))
@app.route('/deleteFace/<string:face_name>')
def delete_face(face_name):
print("request좀 보여줘", face_name)
os.remove("./static/img/" + face_name + '.jpg')
face_recognition.face_to_npy()
return redirect(url_for('index'))
@app.route('/uploadVideo', methods=('GET', 'POST'))
@app.route('/uploadVideo')
def upload_video():
if request.method == 'POST':
f = request.files.get('video')
f.save("./face_emotion_recognition/data/" + secure_filename(f.filename))
return 'video uploaded successfully'
f = request.files.get('video')
f.save("./static/video/" + secure_filename(f.filename))
return 'video uploaded successfully'
if __name__=="__main__":
app.run(debug=True)
if __name__ == "__main__":
app.debug = True
app.run()
......
import dlib
import cv2
import numpy as np
import os, path
import os
import path
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
print(os.getcwd())
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1('face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
sp = dlib.shape_predictor(
'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
def find_faces(img):
......@@ -49,7 +51,7 @@ def encode_faces(img, shapes):
def face_to_npy():
img_dict = 'face_emotion_recognition/img/'
img_dict = 'static/img/'
print(img_dict)
img_paths = {}
descs = {}
......@@ -57,7 +59,7 @@ def face_to_npy():
if(img_name.rsplit('.')[1] == 'png' or img_name.rsplit('.')[1] == 'jpg'):
img_paths[img_name.rsplit('.')[0]] = img_dict + img_name
print(img_paths, descs)
for name, img_path in img_paths.items():
img_bgr = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
......@@ -69,6 +71,5 @@ def face_to_npy():
else:
os.remove(img_path)
print(descs)
np.save('face_emotion_recognition/img/descs.npy', descs)
\ No newline at end of file
np.save('static/img/descs.npy', descs)
......
......@@ -9,29 +9,21 @@ import pathlib
import time
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
from tensorflow import keras
import time
start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
descs = np.load('img/descs2.npy', allow_pickle=True)[()]
video_path = './data/zoom_1.mp4'
cap=cv2.VideoCapture(video_path)
labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
# labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
# predictor = dlib.shape_predictor(
# 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
# facerec = dlib.face_recognition_model_v1(
# 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
# model = load_model(
# 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
......@@ -39,81 +31,101 @@ def get_key(val):
def convertMillis(millis):
seconds=(millis/1000)%60
minutes=(millis/(1000*60))%60
hours=(millis/(1000*60*60))%24
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
hours = (millis/(1000*60*60)) % 24
return seconds, int(minutes), int(hours)
#cap = cv2.VideoCapture(0) # 0번 카메라
# 동영상 크기(frame정보)를 읽어옴
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_size = (frameWidth, frameHeight)
fps = cap.get((cv2.CAP_PROP_FPS))
_, img_bgr = cap.read() # (800, 1920, 3)
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
prev_time = 0
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size)
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
if(type(frameBGR) == type(None)):
pass
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > 1.5) :
prev_time = time.time()
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) /255
emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
# emotion = get_key(model.predict_classes(imgarr))
last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
# cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
# print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27 :
break
print(time_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1,5):
cv2.destroyAllWindows()
cv2.waitKey(1)
def videoDetector(input_fps, video_name):
# face & emotion model load
predictor = dlib.shape_predictor(
'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model(
'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
start = time.time()
detector = dlib.get_frontal_face_detector()
# face & emotion detection time dict
descs = np.load('../static/img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
for name, saved_desc in descs.items():
face_emotion_dict[name] = {'angry': [], 'fear': [
], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
# video 정보 불러오기
video_path = '../static/video/' + video_name + '.mp4'
cap = cv2.VideoCapture(video_path)
_, img_bgr = cap.read() # (800, 1920, 3)
resized_width = 1920
video_size = (resized_width, int(
img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
prev_time = 0
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
if(type(frameBGR) == type(None)):
pass
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > input_fps):
prev_time = time.time()
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(
frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left(
):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
emotion = labels_dict_[
model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown',
'dist': 0.6, 'color': (0, 0, 255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm(
[face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {
'name': name, 'dist': dist, 'color': (255, 255, 255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(
cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append(
"{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(
con_hour, con_min, round(con_sec, 3), emotion))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27:
break
print(face_emotion_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1, 5):
cv2.destroyAllWindows()
cv2.waitKey(1)
......
......@@ -9,30 +9,35 @@ import pathlib
import time
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
from tensorflow import keras
import time
start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
predictor = dlib.shape_predictor(
"./models/shape_predictor_68_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1(
'models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5')
descs = np.load('img/descs2.npy', allow_pickle=True)[()]
video_path = './data/zoom_1.mp4'
cap=cv2.VideoCapture(video_path)
cap = cv2.VideoCapture(video_path)
# labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
labels_dict_ = {'angry': 0, 'fear': 1, 'happy': 2,
'neutral': 3, 'sad': 4, 'surprise': 5}
time_dict = {'angry': [], 'fear': [], 'happy': [],
'neutral': [], 'sad': [], 'surprise': []}
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
......@@ -40,13 +45,14 @@ def get_key(val):
def convertMillis(millis):
seconds=(millis/1000)%60
minutes=(millis/(1000*60))%60
hours=(millis/(1000*60*60))%24
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
hours = (millis/(1000*60*60)) % 24
return seconds, int(minutes), int(hours)
#cap = cv2.VideoCapture(0) # 0번 카메라
# cap = cv2.VideoCapture(0) # 0번 카메라
# 동영상 크기(frame정보)를 읽어옴
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
......@@ -54,10 +60,11 @@ frame_size = (frameWidth, frameHeight)
fps = cap.get((cv2.CAP_PROP_FPS))
_, img_bgr = cap.read() # (800, 1920, 3)
_, img_bgr = cap.read() # (800, 1920, 3)
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
video_size = (resized_width, int(
img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
prev_time = 0
......@@ -65,7 +72,7 @@ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size)
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
if(type(frameBGR) == type(None)):
......@@ -73,48 +80,56 @@ while True:
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > 1.5) :
if (retval is True) and (current_time > 1.5):
prev_time = time.time()
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) /255
img = cv2.resize(frame[face.top():face.bottom(), face.left(
):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
# emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
emotion = get_key(model.predict_classes(imgarr))
last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
last_found = {'name': 'unknown',
'dist': 0.6, 'color': (0, 0, 255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
dist = np.linalg.norm(
[face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
last_found = {'name': name, 'dist': dist,
'color': (255, 255, 255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
# cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
con_sec, con_min, con_hour = convertMillis(
cap.get(cv2.CAP_PROP_POS_MSEC))
time_dict[emotion].append("{0}:{1}:{2}".format(
con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(
con_hour, con_min, round(con_sec, 3), emotion))
# print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27 :
if key == 27:
break
print(time_dict)
print(time_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1,5):
for i in range(1, 5):
cv2.destroyAllWindows()
cv2.waitKey(1)
......
import dlib
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import math
import os
import pathlib
import time
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
from tensorflow import keras
import time
start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
return key
def convertMillis(millis):
seconds=(millis/1000)%60
minutes=(millis/(1000*60))%60
hours=(millis/(1000*60*60))%24
return seconds, int(minutes), int(hours)
def videoDetector(input_fps, video_name):
# face & emotion detection time dict
descs = np.load('./img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
for name, saved_desc in descs.items():
face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
# video 정보 불러오기
video_path = './data/' + video_name + '.mp4'
cap=cv2.VideoCapture(video_path)
# 동영상 크기(frame정보)를 읽어옴
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_size = (frameWidth, frameHeight)
fps = cap.get((cv2.CAP_PROP_FPS))
print(fps)
_, img_bgr = cap.read() # (800, 1920, 3)
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
prev_time = 0
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
if(type(frameBGR) == type(None)):
pass
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > 1.5) :
prev_time = time.time()
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) /255
emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27 :
break
print(face_emotion_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1,5):
cv2.destroyAllWindows()
cv2.waitKey(1)
if __name__ == '__main__':
videoDetector(3, 'zoom_1')
\ No newline at end of file
h1{
color: gray;
}
\ No newline at end of file
<!DOCTYPE html>
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<title>Flask Face Emotion Recognition App</title>
</head>
<body>
<h1>대문 페이지</h1>
이름: {{user}}
레벨: {{data.level}}
포인트: {{data.point}}
경험치: {{data.exp}}
<form action="/" method="post">
다음이름: <input type="text" name='user'>
<input type="submit" value='전송하기'>
</form>
<form action="http://localhost:5000/uploadFace" method="POST"
enctype="multipart/form-data"
>
<input type="file" name="file">
<input type="submit"/>
</form>
<form action="http://localhost:5000/uploadVideo" method="POST"
enctype="multipart/form-data"
>
<input type="file" name='video'>
<input type="submit">
</form>
<div class="container" style="margin-top: 100px">
<h3>Face Emotion Recognition Platform</h3>
<hr>
{% if face_imgs == [] %}
<div class="alert alert-warning" role="alert">
No face images available
</div>
{% else %}
<table class="table">
<thead>
<tr>
<th scope="col ">id</th>
<th scope="col">Name</th>
<th scope="col">Image</th>
<th scope="col">Delete</th>
</tr>
</thead>
<tbody>
{% for face_img in face_imgs %}
<tr>
<th scope="row">{{ face_img.id }}</th>
<td>{{ face_img.name }}</td>
<td><img src={{ url_for('static', filename=face_img.imgUrl) }} width="100" height="120"></td>
<td><a href="/deleteFace/{{ face_img.name }}" class="btn btn-danger">Delete</a></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Face Upload</label>
<input type="file" name="file">
<button type="submit" class="btn btn-outline-primary">Add</button>
<a href="/goTest" type="button" class="btn btn-outline-dark"> 비디오 분석하러 가기 </a>
</div>
</form>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
crossorigin="anonymous"></script>
</body>
</html>
\ No newline at end of file
......
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<title>Flask Face Emotion Recognition App</title>
</head>
<body>
<div class="container" style="margin-top: 100px">
<h3>Face Emotion Recognition Platform</h3>
<hr>
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Video Upload</label>
<input type="file" name="file">
<button type="submit" class="btn btn-outline-primary">Add</button>
</div>
</form>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
crossorigin="anonymous"></script>
</body>
</html>
\ No newline at end of file