김성연

Refactor file configuration and Fix model loader

1 .ipynb_checkpoints/* 1 .ipynb_checkpoints/*
2 .DS_Store 2 .DS_Store
3 +web/__pycache__/*
4 +web/face_emotion_recognition/__pycache__
5 +web/face_emotion_recogntion/data
6 +web/face_emotion_recogntion/img
7 +web/static/*
...\ No newline at end of file ...\ No newline at end of file
......
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
1 -import sys, os 1 +import sys
2 -from face_emotion_recognition import face_recognition 2 +import os
3 +
4 +from flask.helpers import url_for
5 +from face_emotion_recognition import face_recognition, video2
3 from flask import Flask, render_template 6 from flask import Flask, render_template
4 from flask.globals import request 7 from flask.globals import request
5 -from werkzeug.utils import secure_filename 8 +from werkzeug.utils import redirect, secure_filename
9 +
10 +
11 +def find_face_imgs():
12 + face_imgs = []
13 + id = 1
14 + for img_name in os.listdir('static/img'):
15 + if(img_name.rsplit('.')[1] == 'jpg' or img_name.rsplit('.')[1] == 'png'):
16 + face_imgs.append(
17 + {
18 + 'id': id,
19 + 'name': img_name.rsplit('.')[0],
20 + 'imgUrl': 'img/' + img_name
21 + })
22 + id += 1
23 + return face_imgs
6 24
7 25
8 # Flask 객체 인스턴스 생성 26 # Flask 객체 인스턴스 생성
9 app = Flask(__name__) 27 app = Flask(__name__)
10 28
29 +
11 @app.route('/', methods=('GET', 'POST')) # 접속하는 url 30 @app.route('/', methods=('GET', 'POST')) # 접속하는 url
12 def index(): 31 def index():
13 if request.method == 'POST': 32 if request.method == 'POST':
14 - print(request.form.get('user')) 33 + return render_template('index.html', face_imgs=find_face_imgs())
15 - user = request.form.get('user')
16 - data = {'level' : 50, 'point' : 360, 'exp': 45000}
17 - return render_template('index.html', user = user, data = data)
18 34
19 elif request.method == 'GET': 35 elif request.method == 'GET':
20 - user = '반원' 36 + return render_template('index.html', face_imgs=find_face_imgs())
21 - data = {'level' : 50, 'point' : 360, 'exp': 45000} 37 +
22 - return render_template('index.html', user = user, data=data) 38 +
39 +@app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
40 +def test():
41 + if request.method == 'GET':
42 + return render_template('test.html', face_imgs=find_face_imgs())
23 43
24 44
25 @app.route('/uploadFace', methods=('GET', 'POST')) 45 @app.route('/uploadFace', methods=('GET', 'POST'))
...@@ -28,17 +48,26 @@ def upload_face(): ...@@ -28,17 +48,26 @@ def upload_face():
28 return render_template('upload.html') 48 return render_template('upload.html')
29 elif request.method == 'POST': 49 elif request.method == 'POST':
30 f = request.files.get('file') 50 f = request.files.get('file')
31 - f.save("./face_emotion_recognition/img/" + secure_filename(f.filename)) 51 + f.save("./static/img/" + secure_filename(f.filename))
52 + face_recognition.face_to_npy()
53 + return redirect(url_for('index'))
54 +
55 +
56 +@app.route('/deleteFace/<string:face_name>')
57 +def delete_face(face_name):
58 + print("request좀 보여줘", face_name)
59 + os.remove("./static/img/" + face_name + '.jpg')
32 face_recognition.face_to_npy() 60 face_recognition.face_to_npy()
33 - return 'face image uploaded successfully' 61 + return redirect(url_for('index'))
62 +
34 63
35 -@app.route('/uploadVideo', methods=('GET', 'POST')) 64 +@app.route('/uploadVideo')
36 def upload_video(): 65 def upload_video():
37 - if request.method == 'POST':
38 f = request.files.get('video') 66 f = request.files.get('video')
39 - f.save("./face_emotion_recognition/data/" + secure_filename(f.filename)) 67 + f.save("./static/video/" + secure_filename(f.filename))
40 return 'video uploaded successfully' 68 return 'video uploaded successfully'
41 69
42 70
43 -if __name__=="__main__": 71 +if __name__ == "__main__":
44 - app.run(debug=True) 72 + app.debug = True
73 + app.run()
......
1 import dlib 1 import dlib
2 import cv2 2 import cv2
3 import numpy as np 3 import numpy as np
4 -import os, path 4 +import os
5 +import path
5 import matplotlib.pyplot as plt 6 import matplotlib.pyplot as plt
6 import matplotlib.patches as patches 7 import matplotlib.patches as patches
7 import matplotlib.patheffects as path_effects 8 import matplotlib.patheffects as path_effects
8 9
9 10
10 -print(os.getcwd())
11 detector = dlib.get_frontal_face_detector() 11 detector = dlib.get_frontal_face_detector()
12 -sp = dlib.shape_predictor('face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat') 12 +sp = dlib.shape_predictor(
13 -facerec = dlib.face_recognition_model_v1('face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat') 13 + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
14 +facerec = dlib.face_recognition_model_v1(
15 + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
14 16
15 17
16 def find_faces(img): 18 def find_faces(img):
...@@ -49,7 +51,7 @@ def encode_faces(img, shapes): ...@@ -49,7 +51,7 @@ def encode_faces(img, shapes):
49 51
50 52
51 def face_to_npy(): 53 def face_to_npy():
52 - img_dict = 'face_emotion_recognition/img/' 54 + img_dict = 'static/img/'
53 print(img_dict) 55 print(img_dict)
54 img_paths = {} 56 img_paths = {}
55 descs = {} 57 descs = {}
...@@ -69,6 +71,5 @@ def face_to_npy(): ...@@ -69,6 +71,5 @@ def face_to_npy():
69 else: 71 else:
70 os.remove(img_path) 72 os.remove(img_path)
71 73
72 -
73 print(descs) 74 print(descs)
74 - np.save('face_emotion_recognition/img/descs.npy', descs)
...\ No newline at end of file ...\ No newline at end of file
75 + np.save('static/img/descs.npy', descs)
......
...@@ -9,29 +9,21 @@ import pathlib ...@@ -9,29 +9,21 @@ import pathlib
9 import time 9 import time
10 import pandas as pd 10 import pandas as pd
11 import tensorflow as tf 11 import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img 12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
13 from tensorflow.keras.models import load_model 13 from tensorflow.keras.models import load_model
14 from tensorflow.keras import regularizers 14 from tensorflow.keras import regularizers
15 from tensorflow import keras 15 from tensorflow import keras
16 import time 16 import time
17 17
18 18
19 -start = time.time() 19 +# predictor = dlib.shape_predictor(
20 -detector = dlib.get_frontal_face_detector() 20 +# 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
21 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat") 21 +# facerec = dlib.face_recognition_model_v1(
22 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat') 22 +# 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
23 -model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') 23 +# model = load_model(
24 +# 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
24 25
25 -descs = np.load('img/descs2.npy', allow_pickle=True)[()]
26 26
27 -video_path = './data/zoom_1.mp4'
28 -cap=cv2.VideoCapture(video_path)
29 -
30 -
31 -
32 -labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
33 -# labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
34 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
35 def get_key(val): 27 def get_key(val):
36 for key, value in labels_dict_.items(): 28 for key, value in labels_dict_.items():
37 if(value == val): 29 if(value == val):
...@@ -39,31 +31,44 @@ def get_key(val): ...@@ -39,31 +31,44 @@ def get_key(val):
39 31
40 32
41 def convertMillis(millis): 33 def convertMillis(millis):
42 - seconds=(millis/1000)%60 34 + seconds = (millis/1000) % 60
43 - minutes=(millis/(1000*60))%60 35 + minutes = (millis/(1000*60)) % 60
44 - hours=(millis/(1000*60*60))%24 36 + hours = (millis/(1000*60*60)) % 24
45 return seconds, int(minutes), int(hours) 37 return seconds, int(minutes), int(hours)
46 38
47 -#cap = cv2.VideoCapture(0) # 0번 카메라
48 39
49 -# 동영상 크기(frame정보)를 읽어옴 40 +def videoDetector(input_fps, video_name):
50 -frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
51 -frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
52 -frame_size = (frameWidth, frameHeight)
53 -fps = cap.get((cv2.CAP_PROP_FPS))
54 41
42 + # face & emotion model load
43 + predictor = dlib.shape_predictor(
44 + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
45 + facerec = dlib.face_recognition_model_v1(
46 + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
47 + model = load_model(
48 + 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
55 49
56 -_, img_bgr = cap.read() # (800, 1920, 3) 50 + start = time.time()
57 -padding_size = 0 51 + detector = dlib.get_frontal_face_detector()
58 -resized_width = 1920
59 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
60 -timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
61 -prev_time = 0
62 52
63 -fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') 53 + # face & emotion detection time dict
64 -# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size) 54 + descs = np.load('../static/img/descs.npy', allow_pickle=True)[()]
65 - 55 + labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
66 -while True: 56 + 3: 'neutral', 4: 'sad', 5: 'surprise'}
57 + face_emotion_dict = {}
58 + for name, saved_desc in descs.items():
59 + face_emotion_dict[name] = {'angry': [], 'fear': [
60 + ], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
61 +
62 + # video 정보 불러오기
63 + video_path = '../static/video/' + video_name + '.mp4'
64 + cap = cv2.VideoCapture(video_path)
65 + _, img_bgr = cap.read() # (800, 1920, 3)
66 + resized_width = 1920
67 + video_size = (resized_width, int(
68 + img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
69 + prev_time = 0
70 +
71 + while True:
67 retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 72 retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
68 current_time = time.time() - prev_time 73 current_time = time.time() - prev_time
69 74
...@@ -73,47 +78,54 @@ while True: ...@@ -73,47 +78,54 @@ while True:
73 frameBGR = cv2.resize(frameBGR, video_size) 78 frameBGR = cv2.resize(frameBGR, video_size)
74 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) 79 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
75 80
76 - if (retval is True) and (current_time > 1.5) : 81 + if (retval is True) and (current_time > input_fps):
77 prev_time = time.time() 82 prev_time = time.time()
78 faces = detector(frame, 1) 83 faces = detector(frame, 1)
79 84
80 for (i, face) in enumerate(faces): 85 for (i, face) in enumerate(faces):
81 shape = predictor(frame, face) 86 shape = predictor(frame, face)
82 - face_descriptor = facerec.compute_face_descriptor(frame, shape) 87 + face_descriptor = facerec.compute_face_descriptor(
83 - 88 + frame, shape)
84 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
85 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255
86 - emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
87 - # emotion = get_key(model.predict_classes(imgarr))
88 89
90 + img = cv2.resize(frame[face.top():face.bottom(), face.left(
91 + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
92 + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
93 + emotion = labels_dict_[
94 + model.predict(imgarr).argmax(axis=-1)[0]]
89 95
90 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} 96 + last_found = {'name': 'unknown',
97 + 'dist': 0.6, 'color': (0, 0, 255)}
91 98
92 for name, saved_desc in descs.items(): 99 for name, saved_desc in descs.items():
93 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) 100 + dist = np.linalg.norm(
101 + [face_descriptor] - saved_desc, axis=1)
94 if dist < last_found['dist']: 102 if dist < last_found['dist']:
95 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} 103 + last_found = {
104 + 'name': name, 'dist': dist, 'color': (255, 255, 255)}
96 105
97 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) 106 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
98 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 107 + face.right(), face.bottom()), color=last_found['color'], thickness=2)
99 - # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 108 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
109 + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
100 110
101 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) 111 + con_sec, con_min, con_hour = convertMillis(
102 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) 112 + cap.get(cv2.CAP_PROP_POS_MSEC))
103 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) 113 + face_emotion_dict[last_found['name']][emotion].append(
104 - # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec)) 114 + "{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
115 + print("{0}:{1}:{2} {3}".format(
116 + con_hour, con_min, round(con_sec, 3), emotion))
105 117
106 cv2.imshow('frame', frameBGR) 118 cv2.imshow('frame', frameBGR)
107 119
108 key = cv2.waitKey(25) 120 key = cv2.waitKey(25)
109 - if key == 27 : 121 + if key == 27:
110 break 122 break
111 123
112 -print(time_dict) 124 + print(face_emotion_dict)
113 -print("총 시간 : ", time.time() - start) 125 + print("총 시간 : ", time.time() - start)
114 -if cap.isOpened(): 126 + if cap.isOpened():
115 cap.release() 127 cap.release()
116 128
117 -for i in range(1,5): 129 + for i in range(1, 5):
118 cv2.destroyAllWindows() 130 cv2.destroyAllWindows()
119 cv2.waitKey(1) 131 cv2.waitKey(1)
......
...@@ -9,7 +9,7 @@ import pathlib ...@@ -9,7 +9,7 @@ import pathlib
9 import time 9 import time
10 import pandas as pd 10 import pandas as pd
11 import tensorflow as tf 11 import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img 12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
13 from tensorflow.keras.models import load_model 13 from tensorflow.keras.models import load_model
14 from tensorflow.keras import regularizers 14 from tensorflow.keras import regularizers
15 from tensorflow import keras 15 from tensorflow import keras
...@@ -18,21 +18,26 @@ import time ...@@ -18,21 +18,26 @@ import time
18 18
19 start = time.time() 19 start = time.time()
20 detector = dlib.get_frontal_face_detector() 20 detector = dlib.get_frontal_face_detector()
21 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat") 21 +predictor = dlib.shape_predictor(
22 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat') 22 + "./models/shape_predictor_68_face_landmarks.dat")
23 +facerec = dlib.face_recognition_model_v1(
24 + 'models/dlib_face_recognition_resnet_model_v1.dat')
23 model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5') 25 model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5')
24 26
25 27
26 descs = np.load('img/descs2.npy', allow_pickle=True)[()] 28 descs = np.load('img/descs2.npy', allow_pickle=True)[()]
27 29
28 video_path = './data/zoom_1.mp4' 30 video_path = './data/zoom_1.mp4'
29 -cap=cv2.VideoCapture(video_path) 31 +cap = cv2.VideoCapture(video_path)
30 -
31 32
32 33
33 # labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'} 34 # labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
34 -labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5} 35 +labels_dict_ = {'angry': 0, 'fear': 1, 'happy': 2,
35 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []} 36 + 'neutral': 3, 'sad': 4, 'surprise': 5}
37 +time_dict = {'angry': [], 'fear': [], 'happy': [],
38 + 'neutral': [], 'sad': [], 'surprise': []}
39 +
40 +
36 def get_key(val): 41 def get_key(val):
37 for key, value in labels_dict_.items(): 42 for key, value in labels_dict_.items():
38 if(value == val): 43 if(value == val):
...@@ -40,12 +45,13 @@ def get_key(val): ...@@ -40,12 +45,13 @@ def get_key(val):
40 45
41 46
42 def convertMillis(millis): 47 def convertMillis(millis):
43 - seconds=(millis/1000)%60 48 + seconds = (millis/1000) % 60
44 - minutes=(millis/(1000*60))%60 49 + minutes = (millis/(1000*60)) % 60
45 - hours=(millis/(1000*60*60))%24 50 + hours = (millis/(1000*60*60)) % 24
46 return seconds, int(minutes), int(hours) 51 return seconds, int(minutes), int(hours)
47 52
48 -#cap = cv2.VideoCapture(0) # 0번 카메라 53 +# cap = cv2.VideoCapture(0) # 0번 카메라
54 +
49 55
50 # 동영상 크기(frame정보)를 읽어옴 56 # 동영상 크기(frame정보)를 읽어옴
51 frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 57 frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
...@@ -57,7 +63,8 @@ fps = cap.get((cv2.CAP_PROP_FPS)) ...@@ -57,7 +63,8 @@ fps = cap.get((cv2.CAP_PROP_FPS))
57 _, img_bgr = cap.read() # (800, 1920, 3) 63 _, img_bgr = cap.read() # (800, 1920, 3)
58 padding_size = 0 64 padding_size = 0
59 resized_width = 1920 65 resized_width = 1920
60 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1])) 66 +video_size = (resized_width, int(
67 + img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
61 timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)] 68 timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
62 prev_time = 0 69 prev_time = 0
63 70
...@@ -74,7 +81,7 @@ while True: ...@@ -74,7 +81,7 @@ while True:
74 frameBGR = cv2.resize(frameBGR, video_size) 81 frameBGR = cv2.resize(frameBGR, video_size)
75 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) 82 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
76 83
77 - if (retval is True) and (current_time > 1.5) : 84 + if (retval is True) and (current_time > 1.5):
78 prev_time = time.time() 85 prev_time = time.time()
79 faces = detector(frame, 1) 86 faces = detector(frame, 1)
80 87
...@@ -82,32 +89,40 @@ while True: ...@@ -82,32 +89,40 @@ while True:
82 shape = predictor(frame, face) 89 shape = predictor(frame, face)
83 face_descriptor = facerec.compute_face_descriptor(frame, shape) 90 face_descriptor = facerec.compute_face_descriptor(frame, shape)
84 91
85 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC) 92 + img = cv2.resize(frame[face.top():face.bottom(), face.left(
86 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255 93 + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
94 + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
87 # emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]] 95 # emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
88 emotion = get_key(model.predict_classes(imgarr)) 96 emotion = get_key(model.predict_classes(imgarr))
89 97
90 - 98 + last_found = {'name': 'unknown',
91 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} 99 + 'dist': 0.6, 'color': (0, 0, 255)}
92 100
93 for name, saved_desc in descs.items(): 101 for name, saved_desc in descs.items():
94 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) 102 + dist = np.linalg.norm(
103 + [face_descriptor] - saved_desc, axis=1)
95 if dist < last_found['dist']: 104 if dist < last_found['dist']:
96 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} 105 + last_found = {'name': name, 'dist': dist,
106 + 'color': (255, 255, 255)}
97 107
98 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) 108 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
99 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 109 + face.right(), face.bottom()), color=last_found['color'], thickness=2)
110 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
111 + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
100 # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 112 # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
101 113
102 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) 114 + con_sec, con_min, con_hour = convertMillis(
103 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) 115 + cap.get(cv2.CAP_PROP_POS_MSEC))
104 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) 116 + time_dict[emotion].append("{0}:{1}:{2}".format(
117 + con_hour, con_min, round(con_sec, 3)))
118 + print("{0}:{1}:{2} {3}".format(
119 + con_hour, con_min, round(con_sec, 3), emotion))
105 # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec)) 120 # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
106 121
107 cv2.imshow('frame', frameBGR) 122 cv2.imshow('frame', frameBGR)
108 123
109 key = cv2.waitKey(25) 124 key = cv2.waitKey(25)
110 - if key == 27 : 125 + if key == 27:
111 break 126 break
112 127
113 print(time_dict) 128 print(time_dict)
...@@ -115,6 +130,6 @@ print("총 시간 : ", time.time() - start) ...@@ -115,6 +130,6 @@ print("총 시간 : ", time.time() - start)
115 if cap.isOpened(): 130 if cap.isOpened():
116 cap.release() 131 cap.release()
117 132
118 -for i in range(1,5): 133 +for i in range(1, 5):
119 cv2.destroyAllWindows() 134 cv2.destroyAllWindows()
120 cv2.waitKey(1) 135 cv2.waitKey(1)
......
1 +import dlib
2 +import cv2
3 +import numpy as np
4 +import matplotlib.pyplot as plt
5 +import tensorflow as tf
6 +import math
7 +import os
8 +import pathlib
9 +import time
10 +import pandas as pd
11 +import tensorflow as tf
12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
13 +from tensorflow.keras.models import load_model
14 +from tensorflow.keras import regularizers
15 +from tensorflow import keras
16 +import time
17 +
18 +
19 +start = time.time()
20 +detector = dlib.get_frontal_face_detector()
21 +predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
22 +facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
23 +model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
24 +
25 +
26 +def get_key(val):
27 + for key, value in labels_dict_.items():
28 + if(value == val):
29 + return key
30 +
31 +
32 +def convertMillis(millis):
33 + seconds=(millis/1000)%60
34 + minutes=(millis/(1000*60))%60
35 + hours=(millis/(1000*60*60))%24
36 + return seconds, int(minutes), int(hours)
37 +
38 +
39 +def videoDetector(input_fps, video_name):
40 +
41 + # face & emotion detection time dict
42 + descs = np.load('./img/descs.npy', allow_pickle=True)[()]
43 + labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
44 + face_emotion_dict = {}
45 + for name, saved_desc in descs.items():
46 + face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
47 +
48 +
49 + # video 정보 불러오기
50 + video_path = './data/' + video_name + '.mp4'
51 + cap=cv2.VideoCapture(video_path)
52 +
53 + # 동영상 크기(frame정보)를 읽어옴
54 + frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
55 + frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
56 + frame_size = (frameWidth, frameHeight)
57 + fps = cap.get((cv2.CAP_PROP_FPS))
58 + print(fps)
59 +
60 +
61 + _, img_bgr = cap.read() # (800, 1920, 3)
62 + padding_size = 0
63 + resized_width = 1920
64 + video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
65 + timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
66 + prev_time = 0
67 +
68 + fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
69 + while True:
70 + retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
71 + current_time = time.time() - prev_time
72 +
73 + if(type(frameBGR) == type(None)):
74 + pass
75 + else:
76 + frameBGR = cv2.resize(frameBGR, video_size)
77 + frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
78 +
79 + if (retval is True) and (current_time > 1.5) :
80 + prev_time = time.time()
81 + faces = detector(frame, 1)
82 +
83 + for (i, face) in enumerate(faces):
84 + shape = predictor(frame, face)
85 + face_descriptor = facerec.compute_face_descriptor(frame, shape)
86 +
87 + img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
88 + imgarr = np.array(img).reshape(1, 224, 224, 3) /255
89 + emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
90 +
91 + last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
92 +
93 + for name, saved_desc in descs.items():
94 + dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
95 + if dist < last_found['dist']:
96 + last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
97 +
98 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
99 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
100 +
101 + con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
102 + face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
103 + print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
104 +
105 + cv2.imshow('frame', frameBGR)
106 +
107 + key = cv2.waitKey(25)
108 + if key == 27 :
109 + break
110 +
111 + print(face_emotion_dict)
112 + print("총 시간 : ", time.time() - start)
113 + if cap.isOpened():
114 + cap.release()
115 +
116 + for i in range(1,5):
117 + cv2.destroyAllWindows()
118 + cv2.waitKey(1)
119 +
120 +
121 +if __name__ == '__main__':
122 + videoDetector(3, 'zoom_1')
...\ No newline at end of file ...\ No newline at end of file
1 -h1{
2 - color: gray;
3 -}
...\ No newline at end of file ...\ No newline at end of file
1 -<!DOCTYPE html> 1 +<!doctype html>
2 <html lang="en"> 2 <html lang="en">
3 +
3 <head> 4 <head>
4 <meta charset="UTF-8"> 5 <meta charset="UTF-8">
5 - <title>Title</title> 6 + <meta name="viewport"
6 - <link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}"> 7 + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
8 + <meta http-equiv="X-UA-Compatible" content="ie=edge">
9 + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
10 + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
11 + <title>Flask Face Emotion Recognition App</title>
7 </head> 12 </head>
13 +
8 <body> 14 <body>
9 - <h1>대문 페이지</h1>
10 - 이름: {{user}}
11 - 레벨: {{data.level}}
12 - 포인트: {{data.point}}
13 - 경험치: {{data.exp}}
14 -
15 - <form action="/" method="post">
16 - 다음이름: <input type="text" name='user'>
17 - <input type="submit" value='전송하기'>
18 - </form>
19 15
16 + <div class="container" style="margin-top: 100px">
17 + <h3>Face Emotion Recognition Platform</h3>
18 + <hr>
19 +
20 + {% if face_imgs == [] %}
21 + <div class="alert alert-warning" role="alert">
22 + No face images available
23 + </div>
24 + {% else %}
25 + <table class="table">
26 + <thead>
27 + <tr>
28 + <th scope="col ">id</th>
29 + <th scope="col">Name</th>
30 + <th scope="col">Image</th>
31 + <th scope="col">Delete</th>
32 + </tr>
33 + </thead>
34 + <tbody>
35 +
36 + {% for face_img in face_imgs %}
37 + <tr>
38 + <th scope="row">{{ face_img.id }}</th>
39 + <td>{{ face_img.name }}</td>
40 + <td><img src={{ url_for('static', filename=face_img.imgUrl) }} width="100" height="120"></td>
41 + <td><a href="/deleteFace/{{ face_img.name }}" class="btn btn-danger">Delete</a></td>
42 + </tr>
43 + {% endfor %}
44 + </tbody>
45 + </table>
46 + {% endif %}
20 47
21 - <form action="http://localhost:5000/uploadFace" method="POST" 48 +
22 - enctype="multipart/form-data" 49 + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
23 - > 50 + <div class="form-group">
51 + <label for="title" class="text-uppercase">Face Upload</label>
24 <input type="file" name="file"> 52 <input type="file" name="file">
25 - <input type="submit"/> 53 + <button type="submit" class="btn btn-outline-primary">Add</button>
54 + <a href="/goTest" type="button" class="btn btn-outline-dark"> 비디오 분석하러 가기 </a>
55 + </div>
26 </form> 56 </form>
57 + </div>
27 58
28 - <form action="http://localhost:5000/uploadVideo" method="POST" 59 +
29 - enctype="multipart/form-data" 60 + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
30 - > 61 + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
31 - <input type="file" name='video'> 62 + crossorigin="anonymous"></script>
32 - <input type="submit"> 63 + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
33 - </form> 64 + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
65 + crossorigin="anonymous"></script>
66 + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
67 + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
68 + crossorigin="anonymous"></script>
34 </body> 69 </body>
70 +
35 </html> 71 </html>
...\ No newline at end of file ...\ No newline at end of file
......
1 +<!doctype html>
2 +<html lang="en">
3 +
4 +<head>
5 + <meta charset="UTF-8">
6 + <meta name="viewport"
7 + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
8 + <meta http-equiv="X-UA-Compatible" content="ie=edge">
9 + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
10 + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
11 + <title>Flask Face Emotion Recognition App</title>
12 +</head>
13 +
14 +<body>
15 +
16 + <div class="container" style="margin-top: 100px">
17 + <h3>Face Emotion Recognition Platform</h3>
18 + <hr>
19 +
20 + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
21 + <div class="form-group">
22 + <label for="title" class="text-uppercase">Video Upload</label>
23 + <input type="file" name="file">
24 + <button type="submit" class="btn btn-outline-primary">Add</button>
25 + </div>
26 + </form>
27 + </div>
28 +
29 +
30 + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
31 + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
32 + crossorigin="anonymous"></script>
33 + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
34 + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
35 + crossorigin="anonymous"></script>
36 + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
37 + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
38 + crossorigin="anonymous"></script>
39 +</body>
40 +
41 +</html>
...\ No newline at end of file ...\ No newline at end of file