김성연

Delete video test

This diff could not be displayed because it is too large.
1 -import dlib, cv2
2 -import numpy as np
3 -
4 -detector = dlib.get_frontal_face_detector()
5 -sp = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')
6 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
7 -
8 -descs = np.load('img/descs.npy', allow_pickle=True)[()]
9 -
10 -def encode_face(img):
11 - dets = detector(img, 1)
12 -
13 - if len(dets) == 0:
14 - return np.empty(0)
15 -
16 - for k, d in enumerate(dets):
17 - shape = sp(img, d)
18 - face_descriptor = facerec.compute_face_descriptor(img, shape)
19 -
20 - return np.array(face_descriptor)
21 -
22 -video_path = './data/record0.mp4'
23 -cap = cv2.VideoCapture(video_path)
24 -
25 -if not cap.isOpened():
26 - exit()
27 -
28 -_, img_bgr = cap.read() # (800, 1920, 3)
29 -padding_size = 0
30 -resized_width = 1920
31 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
32 -output_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1] + padding_size * 2))
33 -
34 -fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
35 -writer = cv2.VideoWriter('%s_output.mp4' % (video_path.split('.')[0]), fourcc, cap.get(cv2.CAP_PROP_FPS), output_size)
36 -
37 -while True:
38 - ret, img_bgr = cap.read()
39 - if not ret:
40 - break
41 -
42 - img_bgr = cv2.resize(img_bgr, video_size)
43 - img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
44 -
45 - # img_bgr = cv2.copyMakeBorder(img_bgr, top=padding_size, bottom=padding_size, left=0, right=0, borderType=cv2.BORDER_CONSTANT, value=(0,0,0))
46 -
47 - dets = detector(img_bgr, 1)
48 -
49 - for k, d in enumerate(dets):
50 - shape = sp(img_rgb, d)
51 - face_descriptor = facerec.compute_face_descriptor(img_rgb, shape)
52 -
53 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
54 -
55 - for name, saved_desc in descs.items():
56 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
57 -
58 - if dist < last_found['dist']:
59 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
60 -
61 - cv2.rectangle(img_bgr, pt1=(d.left(), d.top()), pt2=(d.right(), d.bottom()), color=last_found['color'], thickness=2)
62 - cv2.putText(img_bgr, last_found['name'], org=(d.left(), d.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
63 -
64 - writer.write(img_bgr)
65 -
66 - cv2.imshow('img', img_bgr)
67 -
68 - if cv2.waitKey(1) == ord('q'):
69 - break
70 -
71 -cap.release()
72 -writer.release()
1 -import dlib
2 -import cv2
3 -import numpy as np
4 -import matplotlib.pyplot as plt
5 -import tensorflow as tf
6 -import math
7 -import os
8 -import pathlib
9 -import time
10 -import pandas as pd
11 -import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
13 -from tensorflow.keras.models import load_model
14 -from tensorflow.keras import regularizers
15 -from tensorflow import keras
16 -from imutils import face_utils
17 -import time
18 -
19 -
20 -start = time.time()
21 -detector = dlib.get_frontal_face_detector()
22 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat")
23 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
24 -model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
25 -# model = load_model('../checkpoint/er-best-mobilenet2-bt32-model-adam.h5')
26 -# model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5')
27 -
28 -
29 -descs = np.load('img/descs2.npy', allow_pickle=True)[()]
30 -
31 -video_path = './data/zoom_1.mp4'
32 -cap=cv2.VideoCapture(video_path)
33 -
34 -
35 -
36 -labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
37 -# labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
38 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
39 -def get_key(val):
40 - for key, value in labels_dict_.items():
41 - if(value == val):
42 - return key
43 -
44 -
45 -def convertMillis(millis):
46 - seconds=(millis/1000)%60
47 - minutes=(millis/(1000*60))%60
48 - hours=(millis/(1000*60*60))%24
49 - return seconds, int(minutes), int(hours)
50 -
51 -#cap = cv2.VideoCapture(0) # 0번 카메라
52 -
53 -# 동영상 크기(frame정보)를 읽어옴
54 -frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
55 -frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
56 -frame_size = (frameWidth, frameHeight)
57 -fps = cap.get((cv2.CAP_PROP_FPS))
58 -
59 -
60 -_, img_bgr = cap.read() # (800, 1920, 3)
61 -padding_size = 0
62 -resized_width = 1920
63 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
64 -timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
65 -prev_time = 0
66 -
67 -fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
68 -# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size)
69 -
70 -while True:
71 - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
72 - current_time = time.time() - prev_time
73 -
74 - if(type(frameBGR) == type(None)):
75 - pass
76 - else:
77 - frameBGR = cv2.resize(frameBGR, video_size)
78 - frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
79 -
80 - if (retval is True) and (current_time > 1.5) :
81 - prev_time = time.time()
82 - faces = detector(frame, 1)
83 -
84 - for (i, face) in enumerate(faces):
85 - shape = predictor(frame, face)
86 - face_descriptor = facerec.compute_face_descriptor(frame, shape)
87 -
88 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
89 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255
90 - emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
91 - # emotion = get_key(model.predict_classes(imgarr))
92 -
93 -
94 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
95 -
96 - for name, saved_desc in descs.items():
97 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
98 - if dist < last_found['dist']:
99 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
100 -
101 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
102 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
103 - # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
104 -
105 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
106 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
107 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
108 - # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
109 -
110 - cv2.imshow('frame', frameBGR)
111 -
112 - key = cv2.waitKey(25)
113 - if key == 27 :
114 - break
115 -
116 -print(time_dict)
117 -print("총 시간 : ", time.time() - start)
118 -if cap.isOpened():
119 - cap.release()
120 -
121 -for i in range(1,5):
122 - cv2.destroyAllWindows()
123 - cv2.waitKey(1)
1 -import dlib
2 -import cv2
3 -import numpy as np
4 -import matplotlib.pyplot as plt
5 -import tensorflow as tf
6 -import math
7 -import os
8 -import pathlib
9 -import time
10 -import pandas as pd
11 -import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
13 -from tensorflow.keras.models import load_model
14 -from tensorflow.keras import regularizers
15 -from tensorflow import keras
16 -from imutils import face_utils
17 -import time
18 -
19 -
20 -start = time.time()
21 -detector = dlib.get_frontal_face_detector()
22 -predictor = dlib.shape_predictor("./models/shape_predictor_68_face_landmarks.dat")
23 -facerec = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
24 -# model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-adam.h5')
25 -model = load_model('../checkpoint/er-best-efficientNet1-bt32-model-SGD.h5')
26 -
27 -
28 -descs = np.load('img/descs2.npy', allow_pickle=True)[()]
29 -
30 -video_path = './data/zoom_1.mp4'
31 -cap=cv2.VideoCapture(video_path)
32 -
33 -
34 -
35 -# labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
36 -labels_dict_ = {'angry' : 0,'fear' : 1 ,'happy' : 2, 'neutral' : 3, 'sad' : 4, 'surprise' : 5}
37 -time_dict = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
38 -def get_key(val):
39 - for key, value in labels_dict_.items():
40 - if(value == val):
41 - return key
42 -
43 -
44 -def convertMillis(millis):
45 - seconds=(millis/1000)%60
46 - minutes=(millis/(1000*60))%60
47 - hours=(millis/(1000*60*60))%24
48 - return seconds, int(minutes), int(hours)
49 -
50 -#cap = cv2.VideoCapture(0) # 0번 카메라
51 -
52 -# 동영상 크기(frame정보)를 읽어옴
53 -frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
54 -frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
55 -frame_size = (frameWidth, frameHeight)
56 -fps = cap.get((cv2.CAP_PROP_FPS))
57 -
58 -
59 -_, img_bgr = cap.read() # (800, 1920, 3)
60 -padding_size = 0
61 -resized_width = 1920
62 -video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
63 -timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
64 -prev_time = 0
65 -
66 -fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
67 -# out1 = cv2.VideoWriter('./data/record0.mp4',fourcc, fps, frame_size)
68 -
69 -while True:
70 - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
71 - current_time = time.time() - prev_time
72 -
73 - if(type(frameBGR) == type(None)):
74 - pass
75 - else:
76 - frameBGR = cv2.resize(frameBGR, video_size)
77 - frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
78 -
79 - if (retval is True) and (current_time > 1.5) :
80 - prev_time = time.time()
81 - faces = detector(frame, 1)
82 -
83 - for (i, face) in enumerate(faces):
84 - shape = predictor(frame, face)
85 - face_descriptor = facerec.compute_face_descriptor(frame, shape)
86 -
87 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
88 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255
89 - # emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
90 - emotion = get_key(model.predict_classes(imgarr))
91 -
92 -
93 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
94 -
95 - for name, saved_desc in descs.items():
96 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
97 - if dist < last_found['dist']:
98 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
99 -
100 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
101 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
102 - # cv2.putText(frameBGR, last_found['name'] + ',' , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
103 -
104 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
105 - time_dict[emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
106 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
107 - # print("{0}:{1}:{2} {3}".format(con_hour, con_min, con_sec))
108 -
109 - cv2.imshow('frame', frameBGR)
110 -
111 - key = cv2.waitKey(25)
112 - if key == 27 :
113 - break
114 -
115 -print(time_dict)
116 -print("총 시간 : ", time.time() - start)
117 -if cap.isOpened():
118 - cap.release()
119 -
120 -for i in range(1,5):
121 - cv2.destroyAllWindows()
122 - cv2.waitKey(1)