Showing
1 changed file
with
182 additions
and
0 deletions
saucecode/test.py
0 → 100644
1 | +import cv2 | ||
2 | +import dlib | ||
3 | +import numpy as np | ||
4 | +from imutils import face_utils | ||
5 | +from keras.models import load_model | ||
6 | +import time | ||
7 | +from eyecrop import crop_eye | ||
8 | + | ||
9 | +import firebase_admin | ||
10 | +from firebase_admin import credentials | ||
11 | +from firebase_admin import db | ||
12 | + | ||
13 | + | ||
14 | +#Firebase database 인증 및 앱 초기화 | ||
15 | +cred = credentials.Certificate('mykey.json') | ||
16 | +firebase_admin.initialize_app(cred,{ | ||
17 | + 'databaseURL' : 'https://finalproject-1404a.firebaseio.com/' | ||
18 | +}) | ||
19 | +ref=db.reference() | ||
20 | + | ||
21 | +#통신 | ||
22 | + | ||
23 | + | ||
24 | +IMG_SIZE = (34, 26) | ||
25 | + | ||
26 | + | ||
27 | +#얼굴 detecting saucecode(opencvDNN) | ||
28 | +model_path = 'sauce/opencv_face_detector_uint8.pb' | ||
29 | +config_path = 'sauce/opencv_face_detector.pbtxt' | ||
30 | +net = cv2.dnn.readNetFromTensorflow(model_path, config_path) | ||
31 | +conf_threshold = 0.7 | ||
32 | +#얼굴 detecting saucecode(opencvDNN) | ||
33 | + | ||
34 | + | ||
35 | + | ||
36 | +#눈깜빡임 detecting saucecode | ||
37 | +detector = dlib.get_frontal_face_detector() | ||
38 | +predictor = dlib.shape_predictor('sauce/shape_predictor_68_face_landmarks.dat') | ||
39 | +model = load_model('sauce/models.h5') | ||
40 | +#눈깜빡임 detecting saucecode | ||
41 | + | ||
42 | +face_count=0 | ||
43 | +eye_count=0 | ||
44 | +fps=0 | ||
45 | +face_control_sec=3 | ||
46 | +eye_control_sec=3 | ||
47 | +face_found=False | ||
48 | +numb=1 | ||
49 | +numb_1=1 | ||
50 | + | ||
51 | +now = time.localtime() | ||
52 | + | ||
53 | +cap = cv2.VideoCapture('sauce/my2.mp4') | ||
54 | +frame_count, tt = 0, 0 | ||
55 | + | ||
56 | + | ||
57 | +while cap.isOpened(): | ||
58 | + #read()의 리턴값은 ret, frame. | ||
59 | + # ret은 읽히면 true 아니면 false. | ||
60 | + ret, img = cap.read() | ||
61 | + if not ret: | ||
62 | + break | ||
63 | + | ||
64 | + frame_count += 1 | ||
65 | + start_time = time.time() | ||
66 | + | ||
67 | + faces = detector(img) | ||
68 | + #print(faces) | ||
69 | + | ||
70 | + eye_found=False | ||
71 | + for face in faces: | ||
72 | + shapes = predictor(img, face) | ||
73 | + shapes = face_utils.shape_to_np(shapes) | ||
74 | + #face_util.shape_to_np는 얼굴은 포인트 숫자화 한 것 | ||
75 | + # left eye는 36~41 | ||
76 | + # right eye는 42~47 | ||
77 | + eye_img_l, eye_rect_l = crop_eye( img,eye_points=shapes[36:42]) | ||
78 | + eye_img_r, eye_rect_r = crop_eye( img,eye_points=shapes[42:48]) | ||
79 | + | ||
80 | + | ||
81 | + | ||
82 | + | ||
83 | + eye_img_l = cv2.resize(eye_img_l, dsize=IMG_SIZE) | ||
84 | + eye_img_r = cv2.resize(eye_img_r, dsize=IMG_SIZE) | ||
85 | + eye_img_r = cv2.flip(eye_img_r, flipCode=1) | ||
86 | + eye_input_l = eye_img_l.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255. | ||
87 | + eye_input_r = eye_img_r.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255. | ||
88 | + | ||
89 | + pred_l = model.predict(eye_input_l) | ||
90 | + pred_r = model.predict(eye_input_r) | ||
91 | + # 시각화 0~1 | ||
92 | + state_l = 'O %.2f' if pred_l > 0.1 else '- %.2f' | ||
93 | + state_r = 'O %.2f' if pred_r > 0.1 else '- %.2f' | ||
94 | + state_l = state_l % pred_l | ||
95 | + state_r = state_r % pred_r | ||
96 | + # '%.1f' %pred_l | ||
97 | + | ||
98 | + cv2.rectangle(img, (eye_rect_l[0],eye_rect_l[1]), (eye_rect_l[2],eye_rect_l[3]), (255,255,255), 2) | ||
99 | + cv2.rectangle(img, (eye_rect_r[0],eye_rect_r[1]), (eye_rect_r[2],eye_rect_r[3]), (255,255,255), 2) | ||
100 | + | ||
101 | + cv2.putText(img, state_l, tuple(eye_rect_l[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,255), 2) | ||
102 | + cv2.putText(img, state_r, tuple(eye_rect_r[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,255), 2) | ||
103 | + #조건문 | ||
104 | + if (pred_r < 0.3 and pred_l < 0.3 and face_found): | ||
105 | + | ||
106 | + | ||
107 | + a="Not 집중 or Sleep %d회"%(numb) | ||
108 | + eye_count = eye_count + 1 | ||
109 | + if (eye_count > fps * eye_control_sec): | ||
110 | + | ||
111 | + ref.update({'nob': a}) | ||
112 | + numb = numb + 1 | ||
113 | + #print("Not concentrate or Sleep") | ||
114 | + eye_count = 0 | ||
115 | + else: | ||
116 | + eye_count = 0 | ||
117 | + | ||
118 | + print("eye_count = ",eye_count) | ||
119 | + | ||
120 | + # prepare input | ||
121 | + result_img = img.copy() | ||
122 | + h, w, _ = result_img.shape | ||
123 | + blob = cv2.dnn.blobFromImage(result_img, 1.0, (300, 300), [104, 117, 123], False, False) | ||
124 | + # blob은 전처리 등 영상 처리 | ||
125 | + net.setInput(blob) | ||
126 | + # inference, find faces | ||
127 | + detections = net.forward() | ||
128 | + face_found = False | ||
129 | + # postprocessing | ||
130 | + for i in range(detections.shape[2]): | ||
131 | + confidence = detections[0, 0, i, 2] | ||
132 | + | ||
133 | + if confidence > conf_threshold: | ||
134 | + face_found = True | ||
135 | + x1 = int(detections[0, 0, i, 3] * w) | ||
136 | + y1 = int(detections[0, 0, i, 4] * h) | ||
137 | + x2 = int(detections[0, 0, i, 5] * w) | ||
138 | + y2 = int(detections[0, 0, i, 6] * h) | ||
139 | + rect_1 = dlib.rectangle(x1, y1, x2, y2) | ||
140 | + | ||
141 | + # draw rects | ||
142 | + cv2.rectangle(result_img, (x1, y1), (x2, y2), (255, 255, 255), int(round(h / 150)), cv2.LINE_AA) | ||
143 | + cv2.putText(result_img, '%.2f%%' % (confidence * 100.), (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, | ||
144 | + (255, 255, 255), 2, cv2.LINE_AA) | ||
145 | + | ||
146 | + #조건문 | ||
147 | + if(face_found is False): | ||
148 | + | ||
149 | + b = "Not Detected %d회"%(numb_1) | ||
150 | + face_count=face_count+1 | ||
151 | + | ||
152 | + if(face_count>=fps*face_control_sec): | ||
153 | + #print("not detected") | ||
154 | + #print("not Detected ") | ||
155 | + ref.update({'nob':b}) | ||
156 | + numb_1=numb_1+1 | ||
157 | + face_count=0 | ||
158 | + else: | ||
159 | + face_count=0 | ||
160 | + print("face_count = ", face_count) | ||
161 | + | ||
162 | + | ||
163 | + | ||
164 | + cv2.putText(result_img, 'FPS: %.2f' % (fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 100, 100), 2, cv2.LINE_AA) | ||
165 | + # visualize | ||
166 | + | ||
167 | + | ||
168 | + | ||
169 | + tt += time.time() - start_time | ||
170 | + fps = frame_count / tt | ||
171 | + cv2.imshow('result', result_img) | ||
172 | + if cv2.waitKey(1) == ord('x'): | ||
173 | + break | ||
174 | + | ||
175 | + | ||
176 | + | ||
177 | +cap.release() | ||
178 | + | ||
179 | +cv2.destroyAllWindows() | ||
180 | + | ||
181 | + | ||
182 | + |
-
Please register or login to post a comment