Graduate

Modify client.py

##################################################
#1. webcam에서 얼굴을 인식합니다. #
#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
#1. webcam에서 얼굴을 인식합니다.
#2. 얼굴일 확률이 97% 이상이고 영역이 15000 이상인 이미지를 서버에 전송
##################################################
import torch
import numpy as np
......@@ -27,11 +26,10 @@ mtcnn = MTCNN(keep_all=True, device=device)
uri = 'ws://localhost:8765'
async def send_face(face_list, image_list):
global uri
async with websockets.connect(uri) as websocket:
for face, image in zip(face_list, image_list):
#type: np.float32
send = json.dumps({"action": "verify", "MTCNN": face.tolist()})
send = json.dumps({'action': 'verify', 'MTCNN': face.tolist()})
await websocket.send(send)
recv = await websocket.recv()
data = json.loads(recv)
......@@ -39,53 +37,58 @@ async def send_face(face_list, image_list):
# 성공
print(data['student_id'], 'is attend')
else:
print('verification failed')
print('verification failed:', data['status'])
if data['status'] == 'failed':
send = json.dumps({'action': 'save_image', 'image': image.tolist()})
await websocket.send(send)
def detect_face(frame):
# If required, create a face detection pipeline using MTCNN:
global mtcnn
results = mtcnn.detect(frame)
faces = mtcnn(frame, return_prob = False)
image_list = []
face_list = []
if results[1][0] == None:
return []
for box, prob in zip(results[0], results[1]):
if prob < 0.95:
return [], []
for box, face, prob in zip(results[0], faces, results[1]):
if prob < 0.97:
continue
print('face detected. prob:', prob)
x1, y1, x2, y2 = box
image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)]
if (x2-x1) * (y2-y1) < 15000:
# 얼굴 해상도가 너무 낮으면 무시
continue
# 얼굴 주변 ±3 영역 저장
image = frame[int(y1-3):int(y2+3), int(x1-3):int(x2+3)]
image_list.append(image)
return image_list
# MTCNN 데이터 저장
face_list.append(face.numpy())
return image_list, face_list
def make_face_list(frame):
global mtcnn
results, prob = mtcnn(frame, return_prob = True)
face_list = []
if prob[0] == None:
return []
for result, prob in zip(results, prob):
if prob < 0.95:
if prob < 0.97:
continue
#np.float32
face_list.append(result.numpy())
return face_list
cap = cv2.VideoCapture(0)
cap.set(3, 720)
cap.set(4, 480)
while True:
if __name__ == '__main__':
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, 720)
cap.set(4, 480)
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
while True:
try:
#start = timeit.default_timer()
ret, frame = cap.read()
cv2.imshow('img', frame)
cv2.waitKey(10)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_list = make_face_list(frame)
image_list = detect_face(frame)
##embedding server로 전송##
if face_list:
image_list, face_list = detect_face(frame)
if not face_list:
continue;
asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
#end = timeit.default_timer()
#print('delta time: ', end - start)
except Exception as ex:
print(ex)
......
##################################################
#1. webcam에서 얼굴을 인식합니다. #
#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
##################################################
import torch
import numpy as np
import cv2
import asyncio
import websockets
import json
import os
import timeit
import base64
from PIL import Image
from io import BytesIO
import requests
from models.mtcnn import MTCNN
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(keep_all=True, device=device)
uri = 'ws://localhost:8765'
async def send_face(face_list, image_list):
global uri
async with websockets.connect(uri) as websocket:
for face, image in zip(face_list, image_list):
#type: np.float32
send = json.dumps({"action": "verify", "MTCNN": face.tolist()})
await websocket.send(send)
recv = await websocket.recv()
data = json.loads(recv)
if data['status'] == 'success':
# 성공
print(data['student_id'], 'is attend')
else:
print('verification failed')
send = json.dumps({'action': 'save_image', 'image': image.tolist()})
await websocket.send(send)
def detect_face(frame):
# If required, create a face detection pipeline using MTCNN:
global mtcnn
results = mtcnn.detect(frame)
image_list = []
if results[1][0] == None:
return []
for box, prob in zip(results[0], results[1]):
if prob < 0.95:
continue
print('face detected. prob:', prob)
x1, y1, x2, y2 = box
image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)]
image_list.append(image)
return image_list
def make_face_list(frame):
global mtcnn
results, prob = mtcnn(frame, return_prob = True)
face_list = []
if prob[0] == None:
return []
for result, prob in zip(results, prob):
if prob < 0.95:
continue
#np.float32
face_list.append(result.numpy())
return face_list
cap = cv2.VideoCapture(0)
cap.set(3, 720)
cap.set(4, 480)
while True:
try:
#start = timeit.default_timer()
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_list = make_face_list(frame)
image_list = detect_face(frame)
##embedding server로 전송##
if face_list:
asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
#end = timeit.default_timer()
#print('delta time: ', end - start)
except Exception as ex:
print(ex)