evaluate.py 4.42 KB
import cv2, imutils, time, os
import numpy as np
from mylib import config
from mylib.detection_test01 import detect_people
from scipy.spatial import distance as dist

# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")

# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

# load our YOLO object detector trained on COCO dataset (80 classes)
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

# check if we are going to use GPU
if config.USE_GPU:
    # set CUDA as the preferable backend and target
    print("")
    print("[INFO] Looking for GPU")
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

for meter in range(1, 4):
    print("[INFO] Reading the images..")
    frame = cv2.imread('./mylib/images/{}m.jpg'.format(meter), cv2.IMREAD_ANYCOLOR)

    imageHeight, imageWidth = frame.shape[:2]
    resizeHeight = int(0.2 * imageHeight)
    resizeWidth = int(0.2 * imageWidth)

    # frame = imutils.resize(frame, width=700)
    frame = cv2.resize(frame, (resizeWidth, resizeHeight), interpolation=cv2.INTER_LINEAR)

    results = detect_people(frame, net, ln, personIdx=LABELS.index("person"))


    # initialize the set of indexes that violate the max/min social distance limits
    serious = set()

    # ensure there are *at least* two people detections (required in
    # order to compute our pairwise distance maps)
    if len(results) >= 2:
        # extract all centroids from the results and compute the
        # Euclidean distances between all pairs of the centroids
        centroids = np.array([r[2] for r in results])	# test
        feets = np.array([r[3] for r in results])

        # D = dist.cdist(centroids, centroids, metric="euclidean")	# centroid
        # print("[INFO] centroid를 기준점으로")

        D = dist.cdist(feets, feets, metric="euclidean")    # feet
        print("[INFO] 발을 기준점으로")

        # loop over the upper triangular of the distance matrix
        for i in range(0, D.shape[0]):
            for j in range(i + 1, D.shape[1]):
                print("MIN_DISTANCE : {}".format(config.MIN_DISTANCE))
                print("{}m's pixel distance: {}".format(meter, D[i, j]))
                # check to see if the distance between any two
                # centroid pairs is less than the configured number of pixels
                if D[i, j] < config.MIN_DISTANCE:
                    # update our violation set with the indexes of the centroid pairs
                    serious.add(i)
                    serious.add(j)

    # loop over the results
    for (i, (prob, bbox, centroid, feet)) in enumerate(results):
        # extract the bounding box and centroid coordinates, then
        # initialize the color of the annotation
        (startX, startY, endX, endY) = bbox
        (cX, cY) = centroid
        (fX, fY) = feet
        color = (0, 255, 0)  # green

        # if the index pair exists within the violation/abnormal sets, then update the color
        if i in serious:
            color = (0, 0, 255)  # red

        # draw (1) a bounding box around the person and (2) the
        # centroid coordinates of the person,
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
        # cv2.circle(frame, (cX, cY), 5, color, 2)	# centroid
        cv2.circle(frame, (fX, fY), 5, color, 2)  # feet (img, 원의 중심 좌표, 반지름, 색, 선의 두께)

    ## test
    # draw some of the parameters
    Safe_Distance = "Safe distance: >{} px".format(config.MAX_DISTANCE)
    cv2.putText(frame, Safe_Distance, (frame.shape[1]-200, frame.shape[0] - 25),
                cv2.FONT_HERSHEY_COMPLEX, 0.60, (255, 0, 0), 2)

    # draw the total number of social distancing violations on the output frame
    text = "Total serious violations: {}".format(len(serious))
    cv2.putText(frame, text, (10, frame.shape[0] - 55),
                cv2.FONT_HERSHEY_COMPLEX, 0.70, (0, 0, 255), 2)

    # show the output frame
    cv2.imshow("Real-Time Monitoring/Analysis Window", frame)

    key = cv2.waitKey(0)

# close any open windows
cv2.destroyAllWindows()