김성연

Add weekly report and Refactor flask web code

Showing 24 changed files with 224 additions and 97 deletions
......@@ -2,7 +2,7 @@ import sys
import os
from flask.helpers import url_for
from face_emotion_recognition import face_recognition, video2
from face_emotion_recognition import face_recognition, video4
from flask import Flask, render_template
from flask.globals import request
from werkzeug.utils import redirect, secure_filename
......@@ -39,7 +39,7 @@ def index():
@app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
def test():
if request.method == 'GET':
return render_template('test.html', face_imgs=find_face_imgs())
return render_template('test.html')
@app.route('/uploadFace', methods=('GET', 'POST'))
......@@ -61,11 +61,19 @@ def delete_face(face_name):
return redirect(url_for('index'))
@app.route('/uploadVideo')
@app.route('/uploadVideo', methods=('GET', 'POST'))
def upload_video():
f = request.files.get('video')
f.save("./static/video/" + secure_filename(f.filename))
return 'video uploaded successfully'
if request.method == 'POST':
f = request.files.get('video')
f.save("./static/video/" + secure_filename(f.filename))
return redirect(url_for('test'))
@app.route('/faceEmotinoRecognition')
def faceEmotinoRecognition():
face_emotion_dict = video4.videoDetector(3, 'record0')
print(face_emotion_dict)
return render_template('result.html', face_emotion_dict=face_emotion_dict, face_imgs=find_face_imgs())
if __name__ == "__main__":
......
This diff could not be displayed because it is too large.
......@@ -23,13 +23,6 @@ import time
# model = load_model(
# 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
return key
def convertMillis(millis):
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
......@@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name):
detector = dlib.get_frontal_face_detector()
# face & emotion detection time dict
descs = np.load('../static/img/descs.npy', allow_pickle=True)[()]
descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
......@@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name):
for i in range(1, 5):
cv2.destroyAllWindows()
cv2.waitKey(1)
return face_emotion_dict
......
......@@ -9,114 +9,99 @@ import pathlib
import time
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
from tensorflow import keras
import time
start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
return key
def convertMillis(millis):
seconds=(millis/1000)%60
minutes=(millis/(1000*60))%60
hours=(millis/(1000*60*60))%24
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
hours = (millis/(1000*60*60)) % 24
return seconds, int(minutes), int(hours)
def videoDetector(input_fps, video_name):
def videoDetector(second, video_name):
# face & emotion detection model load
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model(
'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
# face & emotion detection time dict
descs = np.load('./img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
for name, saved_desc in descs.items():
face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
face_emotion_dict[name] = {'angry': [], 'fear': [
], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
# video 정보 불러오기
video_path = './data/' + video_name + '.mp4'
cap=cv2.VideoCapture(video_path)
video_path = 'static/video/' + video_name + '.mp4'
cap = cv2.VideoCapture(video_path)
# 동영상 크기(frame정보)를 읽어옴
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_size = (frameWidth, frameHeight)
fps = cap.get((cv2.CAP_PROP_FPS))
print(fps)
fps = cap.get(cv2.CAP_PROP_FPS)
multiplier = fps * second
_, img_bgr = cap.read() # (800, 1920, 3)
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
prev_time = 0
frameCount = 0
ret = 1
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
while ret:
frameId = int(round(cap.get(1))) # 현재 프레임 번호 가져오기
ret, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
if(type(frameBGR) == type(None)):
pass
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > 1.5) :
prev_time = time.time()
if (ret is True) and (frameId % multiplier < 1):
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) /255
emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27 :
break
try:
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(
frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left(
):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
emotion = labels_dict_[
model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown',
'dist': 0.6, 'color': (0, 0, 255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm(
[face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {
'name': name, 'dist': dist, 'color': (255, 255, 255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(
cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append(
"{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(
con_hour, con_min, round(con_sec, 3), emotion))
except Exception as e:
print(str(e))
frameCount += 1
print(face_emotion_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1,5):
cv2.destroyAllWindows()
cv2.waitKey(1)
if __name__ == '__main__':
videoDetector(3, 'zoom_1')
\ No newline at end of file
return face_emotion_dict
......
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<title>Flask Face Emotion Recognition App</title>
</head>
<body>
<div class="container" style="margin-top: 100px">
<h3>Face Emotion Recognition Platform</h3>
<hr>
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Video Upload</label>
<input type="file" name="file">
<button type="submit" class="btn btn-outline-primary">Add</button>
</div>
</form>
<video autoplay width="320" height="240" controls>
<source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
</video>
<a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
<table class="table">
<thead>
<tr>
<th scope="col ">name</th>
<th scope="col">happy</th>
<th scope="col">sad</th>
<th scope="col">fear</th>
<th scope="col">angry</th>
<th scope="col">neutral</th>
<th scope="col">surprise</th>
</tr>
</thead>
<tbody>
{% for face_img in face_imgs %}
<tr>
<td scope="row">{{ face_img.name }}</td>
{% if face_emotion_dict[face_img.name].happy %}
<td>
{% for time in face_emotion_dict[face_img.name].happy %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].sad %}
<td>
{% for time in face_emotion_dict[face_img.name].sad %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].fear %}
<td>
{% for time in face_emotion_dict[face_img.name].fear %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].angry %}
<td>
{% for time in face_emotion_dict[face_img.name].angry %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].neutral %}
<td>
{% for time in face_emotion_dict[face_img.name].neutral %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].surprise %}
<td>
{% for time in face_emotion_dict[face_img.name].surprise %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
</tr>
{% endfor %}
</tbody>
</table>
<hr/>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
crossorigin="anonymous"></script>
</body>
</html>
\ No newline at end of file
......@@ -17,13 +17,21 @@
<h3>Face Emotion Recognition Platform</h3>
<hr>
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<form action="http://localhost:5000/uploadVideo" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Video Upload</label>
<input type="file" name="file">
<input type="file" name="video">
<button type="submit" class="btn btn-outline-primary">Add</button>
</div>
</form>
<video autoplay width="320" height="240" controls>
<source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
</video>
<a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
<hr/>
</div>
......