김성연

Refactor file configuration and Fix model loader

1 .ipynb_checkpoints/* 1 .ipynb_checkpoints/*
2 .DS_Store 2 .DS_Store
3 +web/__pycache__/*
4 +web/face_emotion_recognition/__pycache__
5 +web/face_emotion_recogntion/data
6 +web/face_emotion_recogntion/img
7 +web/static/*
...\ No newline at end of file ...\ No newline at end of file
......
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
1 -import sys, os 1 +import sys
2 -from face_emotion_recognition import face_recognition 2 +import os
3 +
4 +from flask.helpers import url_for
5 +from face_emotion_recognition import face_recognition, video2
3 from flask import Flask, render_template 6 from flask import Flask, render_template
4 from flask.globals import request 7 from flask.globals import request
5 -from werkzeug.utils import secure_filename 8 +from werkzeug.utils import redirect, secure_filename
9 +
10 +
11 +def find_face_imgs():
12 + face_imgs = []
13 + id = 1
14 + for img_name in os.listdir('static/img'):
15 + if(img_name.rsplit('.')[1] == 'jpg' or img_name.rsplit('.')[1] == 'png'):
16 + face_imgs.append(
17 + {
18 + 'id': id,
19 + 'name': img_name.rsplit('.')[0],
20 + 'imgUrl': 'img/' + img_name
21 + })
22 + id += 1
23 + return face_imgs
6 24
7 25
8 # Flask 객체 인스턴스 생성 26 # Flask 객체 인스턴스 생성
9 app = Flask(__name__) 27 app = Flask(__name__)
10 28
29 +
11 @app.route('/', methods=('GET', 'POST')) # 접속하는 url 30 @app.route('/', methods=('GET', 'POST')) # 접속하는 url
12 def index(): 31 def index():
13 if request.method == 'POST': 32 if request.method == 'POST':
14 - print(request.form.get('user')) 33 + return render_template('index.html', face_imgs=find_face_imgs())
15 - user = request.form.get('user') 34 +
16 - data = {'level' : 50, 'point' : 360, 'exp': 45000}
17 - return render_template('index.html', user = user, data = data)
18 -
19 elif request.method == 'GET': 35 elif request.method == 'GET':
20 - user = '반원' 36 + return render_template('index.html', face_imgs=find_face_imgs())
21 - data = {'level' : 50, 'point' : 360, 'exp': 45000} 37 +
22 - return render_template('index.html', user = user, data=data) 38 +
39 +@app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
40 +def test():
41 + if request.method == 'GET':
42 + return render_template('test.html', face_imgs=find_face_imgs())
23 43
24 44
25 @app.route('/uploadFace', methods=('GET', 'POST')) 45 @app.route('/uploadFace', methods=('GET', 'POST'))
...@@ -28,17 +48,26 @@ def upload_face(): ...@@ -28,17 +48,26 @@ def upload_face():
28 return render_template('upload.html') 48 return render_template('upload.html')
29 elif request.method == 'POST': 49 elif request.method == 'POST':
30 f = request.files.get('file') 50 f = request.files.get('file')
31 - f.save("./face_emotion_recognition/img/" + secure_filename(f.filename)) 51 + f.save("./static/img/" + secure_filename(f.filename))
32 face_recognition.face_to_npy() 52 face_recognition.face_to_npy()
33 - return 'face image uploaded successfully' 53 + return redirect(url_for('index'))
54 +
55 +
56 +@app.route('/deleteFace/<string:face_name>')
57 +def delete_face(face_name):
58 + print("request좀 보여줘", face_name)
59 + os.remove("./static/img/" + face_name + '.jpg')
60 + face_recognition.face_to_npy()
61 + return redirect(url_for('index'))
34 62
35 -@app.route('/uploadVideo', methods=('GET', 'POST')) 63 +
64 +@app.route('/uploadVideo')
36 def upload_video(): 65 def upload_video():
37 - if request.method == 'POST': 66 + f = request.files.get('video')
38 - f = request.files.get('video') 67 + f.save("./static/video/" + secure_filename(f.filename))
39 - f.save("./face_emotion_recognition/data/" + secure_filename(f.filename)) 68 + return 'video uploaded successfully'
40 - return 'video uploaded successfully' 69 +
41 -
42 70
43 -if __name__=="__main__": 71 +if __name__ == "__main__":
44 - app.run(debug=True) 72 + app.debug = True
73 + app.run()
......
1 import dlib 1 import dlib
2 import cv2 2 import cv2
3 import numpy as np 3 import numpy as np
4 -import os, path 4 +import os
5 +import path
5 import matplotlib.pyplot as plt 6 import matplotlib.pyplot as plt
6 import matplotlib.patches as patches 7 import matplotlib.patches as patches
7 import matplotlib.patheffects as path_effects 8 import matplotlib.patheffects as path_effects
8 9
9 10
10 -print(os.getcwd())
11 detector = dlib.get_frontal_face_detector() 11 detector = dlib.get_frontal_face_detector()
12 -sp = dlib.shape_predictor('face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat') 12 +sp = dlib.shape_predictor(
13 -facerec = dlib.face_recognition_model_v1('face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat') 13 + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
14 +facerec = dlib.face_recognition_model_v1(
15 + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
14 16
15 17
16 def find_faces(img): 18 def find_faces(img):
...@@ -49,7 +51,7 @@ def encode_faces(img, shapes): ...@@ -49,7 +51,7 @@ def encode_faces(img, shapes):
49 51
50 52
51 def face_to_npy(): 53 def face_to_npy():
52 - img_dict = 'face_emotion_recognition/img/' 54 + img_dict = 'static/img/'
53 print(img_dict) 55 print(img_dict)
54 img_paths = {} 56 img_paths = {}
55 descs = {} 57 descs = {}
...@@ -57,7 +59,7 @@ def face_to_npy(): ...@@ -57,7 +59,7 @@ def face_to_npy():
57 if(img_name.rsplit('.')[1] == 'png' or img_name.rsplit('.')[1] == 'jpg'): 59 if(img_name.rsplit('.')[1] == 'png' or img_name.rsplit('.')[1] == 'jpg'):
58 img_paths[img_name.rsplit('.')[0]] = img_dict + img_name 60 img_paths[img_name.rsplit('.')[0]] = img_dict + img_name
59 print(img_paths, descs) 61 print(img_paths, descs)
60 - 62 +
61 for name, img_path in img_paths.items(): 63 for name, img_path in img_paths.items():
62 img_bgr = cv2.imread(img_path) 64 img_bgr = cv2.imread(img_path)
63 img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) 65 img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
...@@ -69,6 +71,5 @@ def face_to_npy(): ...@@ -69,6 +71,5 @@ def face_to_npy():
69 else: 71 else:
70 os.remove(img_path) 72 os.remove(img_path)
71 73
72 -
73 print(descs) 74 print(descs)
74 - np.save('face_emotion_recognition/img/descs.npy', descs)
...\ No newline at end of file ...\ No newline at end of file
75 + np.save('static/img/descs.npy', descs)
......
...@@ -9,29 +9,21 @@ import pathlib ...@@ -9,29 +9,21 @@ import pathlib
9 import time 9 import time
10 import pandas as pd 10 import pandas as pd
11 import tensorflow as tf 11 import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img 12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
13 from tensorflow.keras.models import load_model 13 from tensorflow.keras.models import load_model
14 from tensorflow.keras import regularizers 14 from tensorflow.keras import regularizers
15 from tensorflow import keras 15 from tensorflow import keras
16 import time 16 import time
17 17
18 - 18 +
19 -start = time.time() 19 +# predictor = dlib.shape_predictor(
20 -detector = dlib.get_frontal_face_detector() 20 +# 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
21 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat") 21 +# facerec = dlib.face_recognition_model_v1(
22 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat') 22 +# 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
23 -model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') 23 +# model = load_model(
24 - 24 +# 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
25 -descs = np.load('img/descs2.npy', allow_pickle=True)[()] 25 +
26 - 26 +
27 -video_path = './data/zoom_1.mp4'
28 -cap=cv2.VideoCapture(video_path)
29 -
30 -
31 -
32 -labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
33 -# labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
34 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
35 def get_key(val): 27 def get_key(val):
36 for key, value in labels_dict_.items(): 28 for key, value in labels_dict_.items():
37 if(value == val): 29 if(value == val):
...@@ -39,81 +31,101 @@ def get_key(val): ...@@ -39,81 +31,101 @@ def get_key(val):
39 31
40 32
41 def convertMillis(millis): 33 def convertMillis(millis):
42 - seconds=(millis/1000)%60 34 + seconds = (millis/1000) % 60
43 - minutes=(millis/(1000*60))%60 35 + minutes = (millis/(1000*60)) % 60
44 - hours=(millis/(1000*60*60))%24 36 + hours = (millis/(1000*60*60)) % 24
45 return seconds, int(minutes), int(hours) 37 return seconds, int(minutes), int(hours)
46 38
47 -#cap = cv2.VideoCapture(0) # 0번 카메라 39 +
48 - 40 +def videoDetector(input_fps, video_name):
49 -# 동영상 크기(frame정보)를 읽어옴 41 +
50 -frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 42 + # face & emotion model load
51 -frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 43 + predictor = dlib.shape_predictor(
52 -frame_size = (frameWidth, frameHeight) 44 + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
53 -fps = cap.get((cv2.CAP_PROP_FPS)) 45 + facerec = dlib.face_recognition_model_v1(
54 - 46 + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
55 - 47 + model = load_model(
56 -_, img_bgr = cap.read() # (800, 1920, 3) 48 + 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
57 -padding_size = 0 49 +
58 -resized_width = 1920 50 + start = time.time()
59 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1])) 51 + detector = dlib.get_frontal_face_detector()
60 -timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)] 52 +
61 -prev_time = 0 53 + # face & emotion detection time dict
62 - 54 + descs = np.load('../static/img/descs.npy', allow_pickle=True)[()]
63 -fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') 55 + labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
64 -# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size) 56 + 3: 'neutral', 4: 'sad', 5: 'surprise'}
65 - 57 + face_emotion_dict = {}
66 -while True: 58 + for name, saved_desc in descs.items():
67 - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 59 + face_emotion_dict[name] = {'angry': [], 'fear': [
68 - current_time = time.time() - prev_time 60 + ], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
69 - 61 +
70 - if(type(frameBGR) == type(None)): 62 + # video 정보 불러오기
71 - pass 63 + video_path = '../static/video/' + video_name + '.mp4'
72 - else: 64 + cap = cv2.VideoCapture(video_path)
73 - frameBGR = cv2.resize(frameBGR, video_size) 65 + _, img_bgr = cap.read() # (800, 1920, 3)
74 - frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) 66 + resized_width = 1920
75 - 67 + video_size = (resized_width, int(
76 - if (retval is True) and (current_time > 1.5) : 68 + img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
77 - prev_time = time.time() 69 + prev_time = 0
78 - faces = detector(frame, 1) 70 +
79 - 71 + while True:
80 - for (i, face) in enumerate(faces): 72 + retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
81 - shape = predictor(frame, face) 73 + current_time = time.time() - prev_time
82 - face_descriptor = facerec.compute_face_descriptor(frame, shape) 74 +
83 - 75 + if(type(frameBGR) == type(None)):
84 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC) 76 + pass
85 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255 77 + else:
86 - emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]] 78 + frameBGR = cv2.resize(frameBGR, video_size)
87 - # emotion = get_key(model.predict_classes(imgarr)) 79 + frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
88 - 80 +
89 - 81 + if (retval is True) and (current_time > input_fps):
90 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} 82 + prev_time = time.time()
91 - 83 + faces = detector(frame, 1)
92 - for name, saved_desc in descs.items(): 84 +
93 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) 85 + for (i, face) in enumerate(faces):
94 - if dist < last_found['dist']: 86 + shape = predictor(frame, face)
95 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} 87 + face_descriptor = facerec.compute_face_descriptor(
96 - 88 + frame, shape)
97 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) 89 +
98 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 90 + img = cv2.resize(frame[face.top():face.bottom(), face.left(
99 - # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 91 + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
100 - 92 + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
101 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) 93 + emotion = labels_dict_[
102 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) 94 + model.predict(imgarr).argmax(axis=-1)[0]]
103 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) 95 +
104 - # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec)) 96 + last_found = {'name': 'unknown',
105 - 97 + 'dist': 0.6, 'color': (0, 0, 255)}
106 - cv2.imshow('frame', frameBGR) 98 +
107 - 99 + for name, saved_desc in descs.items():
108 - key = cv2.waitKey(25) 100 + dist = np.linalg.norm(
109 - if key == 27 : 101 + [face_descriptor] - saved_desc, axis=1)
110 - break 102 + if dist < last_found['dist']:
111 - 103 + last_found = {
112 -print(time_dict) 104 + 'name': name, 'dist': dist, 'color': (255, 255, 255)}
113 -print("총 시간 : ", time.time() - start) 105 +
114 -if cap.isOpened(): 106 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
115 - cap.release() 107 + face.right(), face.bottom()), color=last_found['color'], thickness=2)
116 - 108 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
117 -for i in range(1,5): 109 + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
118 - cv2.destroyAllWindows() 110 +
119 - cv2.waitKey(1) 111 + con_sec, con_min, con_hour = convertMillis(
112 + cap.get(cv2.CAP_PROP_POS_MSEC))
113 + face_emotion_dict[last_found['name']][emotion].append(
114 + "{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
115 + print("{0}:{1}:{2} {3}".format(
116 + con_hour, con_min, round(con_sec, 3), emotion))
117 +
118 + cv2.imshow('frame', frameBGR)
119 +
120 + key = cv2.waitKey(25)
121 + if key == 27:
122 + break
123 +
124 + print(face_emotion_dict)
125 + print("총 시간 : ", time.time() - start)
126 + if cap.isOpened():
127 + cap.release()
128 +
129 + for i in range(1, 5):
130 + cv2.destroyAllWindows()
131 + cv2.waitKey(1)
......
...@@ -9,30 +9,35 @@ import pathlib ...@@ -9,30 +9,35 @@ import pathlib
9 import time 9 import time
10 import pandas as pd 10 import pandas as pd
11 import tensorflow as tf 11 import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img 12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
13 from tensorflow.keras.models import load_model 13 from tensorflow.keras.models import load_model
14 from tensorflow.keras import regularizers 14 from tensorflow.keras import regularizers
15 from tensorflow import keras 15 from tensorflow import keras
16 import time 16 import time
17 17
18 - 18 +
19 start = time.time() 19 start = time.time()
20 detector = dlib.get_frontal_face_detector() 20 detector = dlib.get_frontal_face_detector()
21 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat") 21 +predictor = dlib.shape_predictor(
22 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat') 22 + "./models/shape_predictor_68_face_landmarks.dat")
23 +facerec = dlib.face_recognition_model_v1(
24 + 'models/dlib_face_recognition_resnet_model_v1.dat')
23 model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5') 25 model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5')
24 - 26 +
25 - 27 +
26 descs = np.load('img/descs2.npy', allow_pickle=True)[()] 28 descs = np.load('img/descs2.npy', allow_pickle=True)[()]
27 - 29 +
28 video_path = './data/zoom_1.mp4' 30 video_path = './data/zoom_1.mp4'
29 -cap=cv2.VideoCapture(video_path) 31 +cap = cv2.VideoCapture(video_path)
30 -
31 32
32 33
33 # labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'} 34 # labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
34 -labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5} 35 +labels_dict_ = {'angry': 0, 'fear': 1, 'happy': 2,
35 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []} 36 + 'neutral': 3, 'sad': 4, 'surprise': 5}
37 +time_dict = {'angry': [], 'fear': [], 'happy': [],
38 + 'neutral': [], 'sad': [], 'surprise': []}
39 +
40 +
36 def get_key(val): 41 def get_key(val):
37 for key, value in labels_dict_.items(): 42 for key, value in labels_dict_.items():
38 if(value == val): 43 if(value == val):
...@@ -40,13 +45,14 @@ def get_key(val): ...@@ -40,13 +45,14 @@ def get_key(val):
40 45
41 46
42 def convertMillis(millis): 47 def convertMillis(millis):
43 - seconds=(millis/1000)%60 48 + seconds = (millis/1000) % 60
44 - minutes=(millis/(1000*60))%60 49 + minutes = (millis/(1000*60)) % 60
45 - hours=(millis/(1000*60*60))%24 50 + hours = (millis/(1000*60*60)) % 24
46 return seconds, int(minutes), int(hours) 51 return seconds, int(minutes), int(hours)
47 52
48 -#cap = cv2.VideoCapture(0) # 0번 카메라 53 +# cap = cv2.VideoCapture(0) # 0번 카메라
49 - 54 +
55 +
50 # 동영상 크기(frame정보)를 읽어옴 56 # 동영상 크기(frame정보)를 읽어옴
51 frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 57 frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
52 frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 58 frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
...@@ -54,10 +60,11 @@ frame_size = (frameWidth, frameHeight) ...@@ -54,10 +60,11 @@ frame_size = (frameWidth, frameHeight)
54 fps = cap.get((cv2.CAP_PROP_FPS)) 60 fps = cap.get((cv2.CAP_PROP_FPS))
55 61
56 62
57 -_, img_bgr = cap.read() # (800, 1920, 3) 63 +_, img_bgr = cap.read() # (800, 1920, 3)
58 padding_size = 0 64 padding_size = 0
59 resized_width = 1920 65 resized_width = 1920
60 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1])) 66 +video_size = (resized_width, int(
67 + img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
61 timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)] 68 timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
62 prev_time = 0 69 prev_time = 0
63 70
...@@ -65,7 +72,7 @@ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') ...@@ -65,7 +72,7 @@ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
65 # out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size) 72 # out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size)
66 73
67 while True: 74 while True:
68 - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 75 + retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
69 current_time = time.time() - prev_time 76 current_time = time.time() - prev_time
70 77
71 if(type(frameBGR) == type(None)): 78 if(type(frameBGR) == type(None)):
...@@ -73,48 +80,56 @@ while True: ...@@ -73,48 +80,56 @@ while True:
73 else: 80 else:
74 frameBGR = cv2.resize(frameBGR, video_size) 81 frameBGR = cv2.resize(frameBGR, video_size)
75 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) 82 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
76 - 83 +
77 - if (retval is True) and (current_time > 1.5) : 84 + if (retval is True) and (current_time > 1.5):
78 prev_time = time.time() 85 prev_time = time.time()
79 faces = detector(frame, 1) 86 faces = detector(frame, 1)
80 - 87 +
81 for (i, face) in enumerate(faces): 88 for (i, face) in enumerate(faces):
82 shape = predictor(frame, face) 89 shape = predictor(frame, face)
83 face_descriptor = facerec.compute_face_descriptor(frame, shape) 90 face_descriptor = facerec.compute_face_descriptor(frame, shape)
84 - 91 +
85 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC) 92 + img = cv2.resize(frame[face.top():face.bottom(), face.left(
86 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255 93 + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
94 + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
87 # emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]] 95 # emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
88 emotion = get_key(model.predict_classes(imgarr)) 96 emotion = get_key(model.predict_classes(imgarr))
89 97
90 - 98 + last_found = {'name': 'unknown',
91 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} 99 + 'dist': 0.6, 'color': (0, 0, 255)}
92 - 100 +
93 for name, saved_desc in descs.items(): 101 for name, saved_desc in descs.items():
94 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) 102 + dist = np.linalg.norm(
103 + [face_descriptor] - saved_desc, axis=1)
95 if dist < last_found['dist']: 104 if dist < last_found['dist']:
96 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} 105 + last_found = {'name': name, 'dist': dist,
97 - 106 + 'color': (255, 255, 255)}
98 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) 107 +
99 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 108 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
109 + face.right(), face.bottom()), color=last_found['color'], thickness=2)
110 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
111 + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
100 # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 112 # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
101 - 113 +
102 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) 114 + con_sec, con_min, con_hour = convertMillis(
103 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) 115 + cap.get(cv2.CAP_PROP_POS_MSEC))
104 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) 116 + time_dict[emotion].append("{0}:{1}:{2}".format(
117 + con_hour, con_min, round(con_sec, 3)))
118 + print("{0}:{1}:{2} {3}".format(
119 + con_hour, con_min, round(con_sec, 3), emotion))
105 # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec)) 120 # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
106 121
107 cv2.imshow('frame', frameBGR) 122 cv2.imshow('frame', frameBGR)
108 123
109 key = cv2.waitKey(25) 124 key = cv2.waitKey(25)
110 - if key == 27 : 125 + if key == 27:
111 break 126 break
112 127
113 -print(time_dict) 128 +print(time_dict)
114 print("총 시간 : ", time.time() - start) 129 print("총 시간 : ", time.time() - start)
115 if cap.isOpened(): 130 if cap.isOpened():
116 cap.release() 131 cap.release()
117 132
118 -for i in range(1,5): 133 +for i in range(1, 5):
119 cv2.destroyAllWindows() 134 cv2.destroyAllWindows()
120 cv2.waitKey(1) 135 cv2.waitKey(1)
......
1 +import dlib
2 +import cv2
3 +import numpy as np
4 +import matplotlib.pyplot as plt
5 +import tensorflow as tf
6 +import math
7 +import os
8 +import pathlib
9 +import time
10 +import pandas as pd
11 +import tensorflow as tf
12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
13 +from tensorflow.keras.models import load_model
14 +from tensorflow.keras import regularizers
15 +from tensorflow import keras
16 +import time
17 +
18 +
19 +start = time.time()
20 +detector = dlib.get_frontal_face_detector()
21 +predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
22 +facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
23 +model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
24 +
25 +
26 +def get_key(val):
27 + for key, value in labels_dict_.items():
28 + if(value == val):
29 + return key
30 +
31 +
32 +def convertMillis(millis):
33 + seconds=(millis/1000)%60
34 + minutes=(millis/(1000*60))%60
35 + hours=(millis/(1000*60*60))%24
36 + return seconds, int(minutes), int(hours)
37 +
38 +
39 +def videoDetector(input_fps, video_name):
40 +
41 + # face & emotion detection time dict
42 + descs = np.load('./img/descs.npy', allow_pickle=True)[()]
43 + labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
44 + face_emotion_dict = {}
45 + for name, saved_desc in descs.items():
46 + face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
47 +
48 +
49 + # video 정보 불러오기
50 + video_path = './data/' + video_name + '.mp4'
51 + cap=cv2.VideoCapture(video_path)
52 +
53 + # 동영상 크기(frame정보)를 읽어옴
54 + frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
55 + frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
56 + frame_size = (frameWidth, frameHeight)
57 + fps = cap.get((cv2.CAP_PROP_FPS))
58 + print(fps)
59 +
60 +
61 + _, img_bgr = cap.read() # (800, 1920, 3)
62 + padding_size = 0
63 + resized_width = 1920
64 + video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
65 + timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
66 + prev_time = 0
67 +
68 + fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
69 + while True:
70 + retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
71 + current_time = time.time() - prev_time
72 +
73 + if(type(frameBGR) == type(None)):
74 + pass
75 + else:
76 + frameBGR = cv2.resize(frameBGR, video_size)
77 + frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
78 +
79 + if (retval is True) and (current_time > 1.5) :
80 + prev_time = time.time()
81 + faces = detector(frame, 1)
82 +
83 + for (i, face) in enumerate(faces):
84 + shape = predictor(frame, face)
85 + face_descriptor = facerec.compute_face_descriptor(frame, shape)
86 +
87 + img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
88 + imgarr = np.array(img).reshape(1, 224, 224, 3) /255
89 + emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
90 +
91 + last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
92 +
93 + for name, saved_desc in descs.items():
94 + dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
95 + if dist < last_found['dist']:
96 + last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
97 +
98 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
99 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
100 +
101 + con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
102 + face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
103 + print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
104 +
105 + cv2.imshow('frame', frameBGR)
106 +
107 + key = cv2.waitKey(25)
108 + if key == 27 :
109 + break
110 +
111 + print(face_emotion_dict)
112 + print("총 시간 : ", time.time() - start)
113 + if cap.isOpened():
114 + cap.release()
115 +
116 + for i in range(1,5):
117 + cv2.destroyAllWindows()
118 + cv2.waitKey(1)
119 +
120 +
121 +if __name__ == '__main__':
122 + videoDetector(3, 'zoom_1')
...\ No newline at end of file ...\ No newline at end of file
1 -h1{
2 - color: gray;
3 -}
...\ No newline at end of file ...\ No newline at end of file
1 -<!DOCTYPE html> 1 +<!doctype html>
2 <html lang="en"> 2 <html lang="en">
3 +
3 <head> 4 <head>
4 <meta charset="UTF-8"> 5 <meta charset="UTF-8">
5 - <title>Title</title> 6 + <meta name="viewport"
6 - <link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}"> 7 + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
8 + <meta http-equiv="X-UA-Compatible" content="ie=edge">
9 + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
10 + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
11 + <title>Flask Face Emotion Recognition App</title>
7 </head> 12 </head>
13 +
8 <body> 14 <body>
9 - <h1>대문 페이지</h1> 15 +
10 - 이름: {{user}} 16 + <div class="container" style="margin-top: 100px">
11 - 레벨: {{data.level}} 17 + <h3>Face Emotion Recognition Platform</h3>
12 - 포인트: {{data.point}} 18 + <hr>
13 - 경험치: {{data.exp}} 19 +
14 - 20 + {% if face_imgs == [] %}
15 - <form action="/" method="post"> 21 + <div class="alert alert-warning" role="alert">
16 - 다음이름: <input type="text" name='user'> 22 + No face images available
17 - <input type="submit" value='전송하기'> 23 + </div>
18 - </form> 24 + {% else %}
19 - 25 + <table class="table">
20 - 26 + <thead>
21 - <form action="http://localhost:5000/uploadFace" method="POST" 27 + <tr>
22 - enctype="multipart/form-data" 28 + <th scope="col ">id</th>
23 - > 29 + <th scope="col">Name</th>
24 - <input type="file" name="file"> 30 + <th scope="col">Image</th>
25 - <input type="submit"/> 31 + <th scope="col">Delete</th>
26 - </form> 32 + </tr>
27 - 33 + </thead>
28 - <form action="http://localhost:5000/uploadVideo" method="POST" 34 + <tbody>
29 - enctype="multipart/form-data" 35 +
30 - > 36 + {% for face_img in face_imgs %}
31 - <input type="file" name='video'> 37 + <tr>
32 - <input type="submit"> 38 + <th scope="row">{{ face_img.id }}</th>
33 - </form> 39 + <td>{{ face_img.name }}</td>
40 + <td><img src={{ url_for('static', filename=face_img.imgUrl) }} width="100" height="120"></td>
41 + <td><a href="/deleteFace/{{ face_img.name }}" class="btn btn-danger">Delete</a></td>
42 + </tr>
43 + {% endfor %}
44 + </tbody>
45 + </table>
46 + {% endif %}
47 +
48 +
49 + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
50 + <div class="form-group">
51 + <label for="title" class="text-uppercase">Face Upload</label>
52 + <input type="file" name="file">
53 + <button type="submit" class="btn btn-outline-primary">Add</button>
54 + <a href="/goTest" type="button" class="btn btn-outline-dark"> 비디오 분석하러 가기 </a>
55 + </div>
56 + </form>
57 + </div>
58 +
59 +
60 + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
61 + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
62 + crossorigin="anonymous"></script>
63 + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
64 + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
65 + crossorigin="anonymous"></script>
66 + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
67 + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
68 + crossorigin="anonymous"></script>
34 </body> 69 </body>
70 +
35 </html> 71 </html>
...\ No newline at end of file ...\ No newline at end of file
......
1 +<!doctype html>
2 +<html lang="en">
3 +
4 +<head>
5 + <meta charset="UTF-8">
6 + <meta name="viewport"
7 + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
8 + <meta http-equiv="X-UA-Compatible" content="ie=edge">
9 + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
10 + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
11 + <title>Flask Face Emotion Recognition App</title>
12 +</head>
13 +
14 +<body>
15 +
16 + <div class="container" style="margin-top: 100px">
17 + <h3>Face Emotion Recognition Platform</h3>
18 + <hr>
19 +
20 + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
21 + <div class="form-group">
22 + <label for="title" class="text-uppercase">Video Upload</label>
23 + <input type="file" name="file">
24 + <button type="submit" class="btn btn-outline-primary">Add</button>
25 + </div>
26 + </form>
27 + </div>
28 +
29 +
30 + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
31 + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
32 + crossorigin="anonymous"></script>
33 + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
34 + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
35 + crossorigin="anonymous"></script>
36 + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
37 + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
38 + crossorigin="anonymous"></script>
39 +</body>
40 +
41 +</html>
...\ No newline at end of file ...\ No newline at end of file