test.py 4.81 KB
import cv2
import dlib
import numpy as np
from imutils import face_utils
from keras.models import load_model
import time
from eyecrop import crop_eye

import firebase_admin
from firebase_admin import credentials
from firebase_admin import db


#Firebase database 인증 및 앱 초기화
cred = credentials.Certificate('mykey.json')
firebase_admin.initialize_app(cred,{
    'databaseURL' : 'https://finalproject-1404a.firebaseio.com/'
})
ref=db.reference()

#통신


IMG_SIZE = (34, 26)


#얼굴 detecting saucecode(opencvDNN)
model_path = 'sauce/opencv_face_detector_uint8.pb'
config_path = 'sauce/opencv_face_detector.pbtxt'
net = cv2.dnn.readNetFromTensorflow(model_path, config_path)
conf_threshold = 0.7
#얼굴 detecting saucecode(opencvDNN)



#눈깜빡임 detecting saucecode
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('sauce/shape_predictor_68_face_landmarks.dat')
model = load_model('sauce/models.h5')
#눈깜빡임 detecting saucecode

face_count=0
eye_count=0
fps=0
face_control_sec=3
eye_control_sec=3
face_found=False
numb=1
numb_1=1

now = time.localtime()

cap = cv2.VideoCapture('sauce/my2.mp4')
frame_count, tt = 0, 0


while cap.isOpened():
  #read()의 리턴값은 ret, frame.
  # ret은 읽히면 true 아니면 false.
  ret, img = cap.read()
  if not ret:
    break

  frame_count += 1
  start_time = time.time()

  faces = detector(img)
  #print(faces)

  eye_found=False
  for face in faces:
    shapes = predictor(img, face)
    shapes = face_utils.shape_to_np(shapes)
    #face_util.shape_to_np는 얼굴은 포인트 숫자화 한 것
    # left eye는 36~41
    # right eye는 42~47
    eye_img_l, eye_rect_l = crop_eye( img,eye_points=shapes[36:42])
    eye_img_r, eye_rect_r = crop_eye( img,eye_points=shapes[42:48])




    eye_img_l = cv2.resize(eye_img_l, dsize=IMG_SIZE)
    eye_img_r = cv2.resize(eye_img_r, dsize=IMG_SIZE)
    eye_img_r = cv2.flip(eye_img_r, flipCode=1)
    eye_input_l = eye_img_l.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.
    eye_input_r = eye_img_r.copy().reshape((1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.

    pred_l = model.predict(eye_input_l)
    pred_r = model.predict(eye_input_r)
    # 시각화 0~1
    state_l = 'O %.2f' if pred_l > 0.1 else '- %.2f'
    state_r = 'O %.2f' if pred_r > 0.1 else '- %.2f'
    state_l = state_l % pred_l
    state_r = state_r % pred_r
    #   '%.1f' %pred_l

    cv2.rectangle(img, (eye_rect_l[0],eye_rect_l[1]), (eye_rect_l[2],eye_rect_l[3]), (255,255,255), 2)
    cv2.rectangle(img, (eye_rect_r[0],eye_rect_r[1]), (eye_rect_r[2],eye_rect_r[3]), (255,255,255), 2)

    cv2.putText(img, state_l, tuple(eye_rect_l[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,255), 2)
    cv2.putText(img, state_r, tuple(eye_rect_r[0:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,255), 2)
    #조건문
    if (pred_r < 0.3 and pred_l < 0.3 and face_found):


        a="Not 집중 or Sleep %d회"%(numb)
        eye_count = eye_count + 1
        if (eye_count > fps * eye_control_sec):

            ref.update({'nob': a})
            numb = numb + 1
            #print("Not concentrate or Sleep")
            eye_count = 0
    else:
        eye_count = 0

    print("eye_count  = ",eye_count)

  # prepare input
  result_img = img.copy()
  h, w, _ = result_img.shape
  blob = cv2.dnn.blobFromImage(result_img, 1.0, (300, 300), [104, 117, 123], False, False)
  # blob은 전처리 등 영상 처리
  net.setInput(blob)
  # inference, find faces
  detections = net.forward()
  face_found = False
  # postprocessing
  for i in range(detections.shape[2]):
      confidence = detections[0, 0, i, 2]

      if confidence > conf_threshold:
          face_found = True
          x1 = int(detections[0, 0, i, 3] * w)
          y1 = int(detections[0, 0, i, 4] * h)
          x2 = int(detections[0, 0, i, 5] * w)
          y2 = int(detections[0, 0, i, 6] * h)
          rect_1 = dlib.rectangle(x1, y1, x2, y2)

          # draw rects
          cv2.rectangle(result_img, (x1, y1), (x2, y2), (255, 255, 255), int(round(h / 150)), cv2.LINE_AA)
          cv2.putText(result_img, '%.2f%%' % (confidence * 100.), (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                      (255, 255, 255), 2, cv2.LINE_AA)

  #조건문
  if(face_found is False):

    b = "Not Detected %d회"%(numb_1)
    face_count=face_count+1

    if(face_count>=fps*face_control_sec):
      #print("not detected")
      #print("not Detected ")
      ref.update({'nob':b})
      numb_1=numb_1+1
      face_count=0
  else:
    face_count=0
  print("face_count = ", face_count)



  cv2.putText(result_img, 'FPS: %.2f' % (fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 100, 100), 2, cv2.LINE_AA)
  # visualize



  tt += time.time() - start_time
  fps = frame_count / tt
  cv2.imshow('result', result_img)
  if cv2.waitKey(1) == ord('x'):
    break



cap.release()

cv2.destroyAllWindows()