Showing
12 changed files
with
55 additions
and
18 deletions
.idea/.gitignore
0 → 100644
.idea/KHY_Project1.iml
0 → 100644
1 | +<?xml version="1.0" encoding="UTF-8"?> | ||
2 | +<module type="PYTHON_MODULE" version="4"> | ||
3 | + <component name="NewModuleRootManager"> | ||
4 | + <content url="file://$MODULE_DIR$" /> | ||
5 | + <orderEntry type="inheritedJdk" /> | ||
6 | + <orderEntry type="sourceFolder" forTests="false" /> | ||
7 | + </component> | ||
8 | + <component name="PyDocumentationSettings"> | ||
9 | + <option name="format" value="GOOGLE" /> | ||
10 | + <option name="myDocStringFormat" value="Google" /> | ||
11 | + </component> | ||
12 | +</module> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/misc.xml
0 → 100644
.idea/modules.xml
0 → 100644
1 | +<?xml version="1.0" encoding="UTF-8"?> | ||
2 | +<project version="4"> | ||
3 | + <component name="ProjectModuleManager"> | ||
4 | + <modules> | ||
5 | + <module fileurl="file://$PROJECT_DIR$/.idea/KHY_Project1.iml" filepath="$PROJECT_DIR$/.idea/KHY_Project1.iml" /> | ||
6 | + </modules> | ||
7 | + </component> | ||
8 | +</project> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/vcs.xml
0 → 100644
... | @@ -115,7 +115,7 @@ class Client(tk.Frame): | ... | @@ -115,7 +115,7 @@ class Client(tk.Frame): |
115 | continue | 115 | continue |
116 | image = frame[int(y1):int(y2), int(x1):int(x2)] | 116 | image = frame[int(y1):int(y2), int(x1):int(x2)] |
117 | image_list.append(image) | 117 | image_list.append(image) |
118 | - # MTCNN 데이터 저장 | 118 | + # tensor 데이터 저장 |
119 | face_list.append(face.numpy()) | 119 | face_list.append(face.numpy()) |
120 | return face_list, image_list | 120 | return face_list, image_list |
121 | 121 | ||
... | @@ -128,13 +128,14 @@ class Client(tk.Frame): | ... | @@ -128,13 +128,14 @@ class Client(tk.Frame): |
128 | y2 = int(self.cam_height / 2 + self.detecting_square[1] / 2) | 128 | y2 = int(self.cam_height / 2 + self.detecting_square[1] / 2) |
129 | while getattr(t, "do_run", True): | 129 | while getattr(t, "do_run", True): |
130 | ret, frame = self.cap.read() | 130 | ret, frame = self.cap.read() |
131 | - # model에 이용하기 위해 convert | 131 | + # BGR to RGB |
132 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | 132 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
133 | face_list, image_list = self.detect_face(converted[y1:y2, x1:x2]) | 133 | face_list, image_list = self.detect_face(converted[y1:y2, x1:x2]) |
134 | # 얼굴이 인식되면 출석요청 | 134 | # 얼굴이 인식되면 출석요청 |
135 | + if face_list: | ||
135 | self.event_loop.run_until_complete(self.send_face(face_list, image_list)) | 136 | self.event_loop.run_until_complete(self.send_face(face_list, image_list)) |
136 | 137 | ||
137 | - # show image | 138 | + # 사각형 영역 표시 |
138 | frame = cv2.rectangle(frame, (x1, y1), (x2, y2), self.rectangle_color, 3) | 139 | frame = cv2.rectangle(frame, (x1, y1), (x2, y2), self.rectangle_color, 3) |
139 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | 140 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
140 | # 거울상으로 보여준다 | 141 | # 거울상으로 보여준다 |
... | @@ -147,18 +148,15 @@ class Client(tk.Frame): | ... | @@ -147,18 +148,15 @@ class Client(tk.Frame): |
147 | @asyncio.coroutine | 148 | @asyncio.coroutine |
148 | def set_rectangle(self): | 149 | def set_rectangle(self): |
149 | self.rectangle_color = (255, 0, 0) | 150 | self.rectangle_color = (255, 0, 0) |
150 | - yield from asyncio.sleep(3) | 151 | + yield from asyncio.sleep(2) |
151 | self.rectangle_color = (0, 0, 255) | 152 | self.rectangle_color = (0, 0, 255) |
152 | 153 | ||
153 | - async def wait(self, n): | ||
154 | - await asyncio.sleep(n) | ||
155 | - | ||
156 | async def send_face(self, face_list, image_list): | 154 | async def send_face(self, face_list, image_list): |
157 | try: | 155 | try: |
158 | async with websockets.connect(uri) as websocket: | 156 | async with websockets.connect(uri) as websocket: |
159 | for face, image in zip(face_list, image_list): | 157 | for face, image in zip(face_list, image_list): |
160 | #type: np.float32 | 158 | #type: np.float32 |
161 | - send = json.dumps({'action': 'verify', 'MTCNN': face.tolist()}) | 159 | + send = json.dumps({'action': 'verify', 'tensor': face.tolist()}) |
162 | await websocket.send(send) | 160 | await websocket.send(send) |
163 | recv = await websocket.recv() | 161 | recv = await websocket.recv() |
164 | data = json.loads(recv) | 162 | data = json.loads(recv) | ... | ... |
No preview for this file type
docs/~$최종보고서1.docx
0 → 100644
No preview for this file type
No preview for this file type
... | @@ -135,13 +135,12 @@ class Register(tk.Frame): | ... | @@ -135,13 +135,12 @@ class Register(tk.Frame): |
135 | continue | 135 | continue |
136 | image = frame | 136 | image = frame |
137 | image_list.append(image) | 137 | image_list.append(image) |
138 | - # MTCNN 데이터 저장 | 138 | + # tensor 데이터 저장 |
139 | face_list.append(face.numpy()) | 139 | face_list.append(face.numpy()) |
140 | return face_list, image_list | 140 | return face_list, image_list |
141 | 141 | ||
142 | def mainthread(self): | 142 | def mainthread(self): |
143 | t = threading.currentThread() | 143 | t = threading.currentThread() |
144 | - #asyncio.set_event_loop(self.event_loop) | ||
145 | x1 = int(self.cam_width / 2 - self.detecting_square[0] / 2) | 144 | x1 = int(self.cam_width / 2 - self.detecting_square[0] / 2) |
146 | x2 = int(self.cam_width / 2 + self.detecting_square[0] / 2) | 145 | x2 = int(self.cam_width / 2 + self.detecting_square[0] / 2) |
147 | y1 = int(self.cam_height / 2 - self.detecting_square[1] / 2) | 146 | y1 = int(self.cam_height / 2 - self.detecting_square[1] / 2) |
... | @@ -153,7 +152,7 @@ class Register(tk.Frame): | ... | @@ -153,7 +152,7 @@ class Register(tk.Frame): |
153 | # model에 이용하기 위해 convert | 152 | # model에 이용하기 위해 convert |
154 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | 153 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
155 | 154 | ||
156 | - # 사각형 영역만 검사 (속도 차이 큼) | 155 | + # 사각형 영역만 검사 |
157 | face_list, image_list = self.detect_face(converted[y1:y2, x1:x2]) | 156 | face_list, image_list = self.detect_face(converted[y1:y2, x1:x2]) |
158 | 157 | ||
159 | # 얼굴이 인식된 경우 파란색 사각형을 띄움 | 158 | # 얼굴이 인식된 경우 파란색 사각형을 띄움 |
... | @@ -162,7 +161,7 @@ class Register(tk.Frame): | ... | @@ -162,7 +161,7 @@ class Register(tk.Frame): |
162 | else: | 161 | else: |
163 | frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3) | 162 | frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3) |
164 | 163 | ||
165 | - # show image | 164 | + # BGR color에서 RGB로 변환 |
166 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | 165 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
167 | # 유저에게 보여줄 땐 거울상으로 보여준다 | 166 | # 유저에게 보여줄 땐 거울상으로 보여준다 |
168 | converted = cv2.flip(converted,1) | 167 | converted = cv2.flip(converted,1) |
... | @@ -198,7 +197,10 @@ class Register(tk.Frame): | ... | @@ -198,7 +197,10 @@ class Register(tk.Frame): |
198 | async with websockets.connect(self.uri) as websocket: | 197 | async with websockets.connect(self.uri) as websocket: |
199 | for face, image in zip(self.face_list, self.image_list): | 198 | for face, image in zip(self.face_list, self.image_list): |
200 | #type: np.float32 | 199 | #type: np.float32 |
201 | - send = json.dumps({'action': 'register', 'student_id':self.studentID.get(), 'student_name':self.studentName.get(), 'MTCNN': face.tolist()}) | 200 | + send = json.dumps({'action': 'register', |
201 | + 'student_id':self.studentID.get(), | ||
202 | + 'student_name':self.studentName.get(), | ||
203 | + 'tensor': face.tolist()}) | ||
202 | await websocket.send(send) | 204 | await websocket.send(send) |
203 | recv = await websocket.recv() | 205 | recv = await websocket.recv() |
204 | data = json.loads(recv) | 206 | data = json.loads(recv) | ... | ... |
... | @@ -35,8 +35,7 @@ clients = set() | ... | @@ -35,8 +35,7 @@ clients = set() |
35 | async def get_embeddings(face_list): | 35 | async def get_embeddings(face_list): |
36 | global model | 36 | global model |
37 | x = torch.Tensor(face_list).to(device) | 37 | x = torch.Tensor(face_list).to(device) |
38 | - yhat = model(x) | 38 | + return model(x) |
39 | - return yhat | ||
40 | 39 | ||
41 | async def get_distance(arr1, arr2): | 40 | async def get_distance(arr1, arr2): |
42 | distance = np.linalg.norm(arr1 - arr2) | 41 | distance = np.linalg.norm(arr1 - arr2) |
... | @@ -78,7 +77,7 @@ async def thread(websocket, path): | ... | @@ -78,7 +77,7 @@ async def thread(websocket, path): |
78 | # load json | 77 | # load json |
79 | student_id = data['student_id'] | 78 | student_id = data['student_id'] |
80 | student_name = data['student_name'] | 79 | student_name = data['student_name'] |
81 | - face = np.asarray(data['MTCNN'], dtype = np.float32) | 80 | + face = np.asarray(data['tensor'], dtype = np.float32) |
82 | face = face.reshape((1,3,160,160)) | 81 | face = face.reshape((1,3,160,160)) |
83 | 82 | ||
84 | # DB에 연결 | 83 | # DB에 연결 |
... | @@ -113,7 +112,7 @@ async def thread(websocket, path): | ... | @@ -113,7 +112,7 @@ async def thread(websocket, path): |
113 | print(msg) | 112 | print(msg) |
114 | 113 | ||
115 | # load json | 114 | # load json |
116 | - face = np.asarray(data['MTCNN'], dtype = np.float32) | 115 | + face = np.asarray(data['tensor'], dtype = np.float32) |
117 | face = face.reshape((1,3,160,160)) | 116 | face = face.reshape((1,3,160,160)) |
118 | 117 | ||
119 | embedding = await get_embeddings(face) | 118 | embedding = await get_embeddings(face) |
... | @@ -171,7 +170,6 @@ async def thread(websocket, path): | ... | @@ -171,7 +170,6 @@ async def thread(websocket, path): |
171 | print(msg) | 170 | print(msg) |
172 | arr = np.asarray(data['image'], dtype = np.uint8) | 171 | arr = np.asarray(data['image'], dtype = np.uint8) |
173 | blob = arr.tobytes() | 172 | blob = arr.tobytes() |
174 | - # TODO: lecture DB에 tuple 삽입해야 아래 코드가 돌아감 | ||
175 | # 테이블 맨 뒤에 datetime attribute가 있음. 서버 시간 가져오게 default로 설정해둠. | 173 | # 테이블 맨 뒤에 datetime attribute가 있음. 서버 시간 가져오게 default로 설정해둠. |
176 | cursor = attendance_db.cursor(pymysql.cursors.DictCursor) | 174 | cursor = attendance_db.cursor(pymysql.cursors.DictCursor) |
177 | sql = "INSERT INTO undefined_image(lecture_id, image, width, height) VALUES (%s, _binary %s, %s, %s)" | 175 | sql = "INSERT INTO undefined_image(lecture_id, image, width, height) VALUES (%s, _binary %s, %s, %s)" | ... | ... |
-
Please register or login to post a comment