Graduate

Modify client.py

1 ################################################## 1 ##################################################
2 -#1. webcam에서 얼굴을 인식합니다. # 2 +#1. webcam에서 얼굴을 인식합니다.
3 -#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. # 3 +#2. 얼굴일 확률이 97% 이상이고 영역이 15000 이상인 이미지를 서버에 전송
4 -#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
5 ################################################## 4 ##################################################
6 import torch 5 import torch
7 import numpy as np 6 import numpy as np
...@@ -27,11 +26,10 @@ mtcnn = MTCNN(keep_all=True, device=device) ...@@ -27,11 +26,10 @@ mtcnn = MTCNN(keep_all=True, device=device)
27 uri = 'ws://localhost:8765' 26 uri = 'ws://localhost:8765'
28 27
29 async def send_face(face_list, image_list): 28 async def send_face(face_list, image_list):
30 - global uri
31 async with websockets.connect(uri) as websocket: 29 async with websockets.connect(uri) as websocket:
32 for face, image in zip(face_list, image_list): 30 for face, image in zip(face_list, image_list):
33 #type: np.float32 31 #type: np.float32
34 - send = json.dumps({"action": "verify", "MTCNN": face.tolist()}) 32 + send = json.dumps({'action': 'verify', 'MTCNN': face.tolist()})
35 await websocket.send(send) 33 await websocket.send(send)
36 recv = await websocket.recv() 34 recv = await websocket.recv()
37 data = json.loads(recv) 35 data = json.loads(recv)
...@@ -39,53 +37,58 @@ async def send_face(face_list, image_list): ...@@ -39,53 +37,58 @@ async def send_face(face_list, image_list):
39 # 성공 37 # 성공
40 print(data['student_id'], 'is attend') 38 print(data['student_id'], 'is attend')
41 else: 39 else:
42 - print('verification failed') 40 + print('verification failed:', data['status'])
41 + if data['status'] == 'failed':
43 send = json.dumps({'action': 'save_image', 'image': image.tolist()}) 42 send = json.dumps({'action': 'save_image', 'image': image.tolist()})
44 - await websocket.send(send)
45 43
46 def detect_face(frame): 44 def detect_face(frame):
47 - # If required, create a face detection pipeline using MTCNN:
48 - global mtcnn
49 results = mtcnn.detect(frame) 45 results = mtcnn.detect(frame)
46 + faces = mtcnn(frame, return_prob = False)
50 image_list = [] 47 image_list = []
48 + face_list = []
51 if results[1][0] == None: 49 if results[1][0] == None:
52 - return [] 50 + return [], []
53 - for box, prob in zip(results[0], results[1]): 51 + for box, face, prob in zip(results[0], faces, results[1]):
54 - if prob < 0.95: 52 + if prob < 0.97:
55 continue 53 continue
56 print('face detected. prob:', prob) 54 print('face detected. prob:', prob)
57 x1, y1, x2, y2 = box 55 x1, y1, x2, y2 = box
58 - image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)] 56 + if (x2-x1) * (y2-y1) < 15000:
57 + # 얼굴 해상도가 너무 낮으면 무시
58 + continue
59 + # 얼굴 주변 ±3 영역 저장
60 + image = frame[int(y1-3):int(y2+3), int(x1-3):int(x2+3)]
59 image_list.append(image) 61 image_list.append(image)
60 - return image_list 62 + # MTCNN 데이터 저장
63 + face_list.append(face.numpy())
64 + return image_list, face_list
61 65
62 def make_face_list(frame): 66 def make_face_list(frame):
63 - global mtcnn
64 results, prob = mtcnn(frame, return_prob = True) 67 results, prob = mtcnn(frame, return_prob = True)
65 face_list = [] 68 face_list = []
66 if prob[0] == None: 69 if prob[0] == None:
67 return [] 70 return []
68 for result, prob in zip(results, prob): 71 for result, prob in zip(results, prob):
69 - if prob < 0.95: 72 + if prob < 0.97:
70 continue 73 continue
71 #np.float32 74 #np.float32
72 face_list.append(result.numpy()) 75 face_list.append(result.numpy())
73 return face_list 76 return face_list
74 77
75 -cap = cv2.VideoCapture(0) 78 +if __name__ == '__main__':
76 -cap.set(3, 720) 79 + cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
77 -cap.set(4, 480) 80 + cap.set(3, 720)
78 -while True: 81 + cap.set(4, 480)
82 + cv2.namedWindow("img", cv2.WINDOW_NORMAL)
83 + while True:
79 try: 84 try:
80 - #start = timeit.default_timer()
81 ret, frame = cap.read() 85 ret, frame = cap.read()
86 + cv2.imshow('img', frame)
87 + cv2.waitKey(10)
82 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 88 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
83 - face_list = make_face_list(frame) 89 + image_list, face_list = detect_face(frame)
84 - image_list = detect_face(frame) 90 + if not face_list:
85 - ##embedding server로 전송## 91 + continue;
86 - if face_list:
87 asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list)) 92 asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
88 - #end = timeit.default_timer()
89 - #print('delta time: ', end - start)
90 except Exception as ex: 93 except Exception as ex:
91 print(ex) 94 print(ex)
......
1 +##################################################
2 +#1. webcam에서 얼굴을 인식합니다. #
3 +#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
4 +#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
5 +##################################################
6 +import torch
7 +import numpy as np
8 +import cv2
9 +import asyncio
10 +import websockets
11 +import json
12 +import os
13 +import timeit
14 +import base64
15 +
16 +from PIL import Image
17 +from io import BytesIO
18 +import requests
19 +
20 +from models.mtcnn import MTCNN
21 +
22 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
23 +print('Running on device: {}'.format(device))
24 +
25 +mtcnn = MTCNN(keep_all=True, device=device)
26 +
27 +uri = 'ws://localhost:8765'
28 +
29 +async def send_face(face_list, image_list):
30 + global uri
31 + async with websockets.connect(uri) as websocket:
32 + for face, image in zip(face_list, image_list):
33 + #type: np.float32
34 + send = json.dumps({"action": "verify", "MTCNN": face.tolist()})
35 + await websocket.send(send)
36 + recv = await websocket.recv()
37 + data = json.loads(recv)
38 + if data['status'] == 'success':
39 + # 성공
40 + print(data['student_id'], 'is attend')
41 + else:
42 + print('verification failed')
43 + send = json.dumps({'action': 'save_image', 'image': image.tolist()})
44 + await websocket.send(send)
45 +
46 +def detect_face(frame):
47 + # If required, create a face detection pipeline using MTCNN:
48 + global mtcnn
49 + results = mtcnn.detect(frame)
50 + image_list = []
51 + if results[1][0] == None:
52 + return []
53 + for box, prob in zip(results[0], results[1]):
54 + if prob < 0.95:
55 + continue
56 + print('face detected. prob:', prob)
57 + x1, y1, x2, y2 = box
58 + image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)]
59 + image_list.append(image)
60 + return image_list
61 +
62 +def make_face_list(frame):
63 + global mtcnn
64 + results, prob = mtcnn(frame, return_prob = True)
65 + face_list = []
66 + if prob[0] == None:
67 + return []
68 + for result, prob in zip(results, prob):
69 + if prob < 0.95:
70 + continue
71 + #np.float32
72 + face_list.append(result.numpy())
73 + return face_list
74 +
75 +cap = cv2.VideoCapture(0)
76 +cap.set(3, 720)
77 +cap.set(4, 480)
78 +while True:
79 + try:
80 + #start = timeit.default_timer()
81 + ret, frame = cap.read()
82 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
83 + face_list = make_face_list(frame)
84 + image_list = detect_face(frame)
85 + ##embedding server로 전송##
86 + if face_list:
87 + asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
88 + #end = timeit.default_timer()
89 + #print('delta time: ', end - start)
90 + except Exception as ex:
91 + print(ex)