evaluate02_centroid.py 6.03 KB
import cv2, imutils, time, os
import numpy as np
from mylib import config
from mylib.detection_test01 import detect_people
from scipy.spatial import distance as dist

# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")

# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

# load our YOLO object detector trained on COCO dataset (80 classes)
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

# check if we are going to use GPU
if config.USE_GPU:
    # set CUDA as the preferable backend and target
    print("")
    print("[INFO] Looking for GPU")
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

### test
win_name = "scanning"
img = cv2.imread("./mylib/images/1m.jpg")

imgHeight, imgWidth = img.shape[:2]
resizeHeight = int(0.2 * imgHeight)
resizeWidth = int(0.2 * imgWidth)

img = cv2.resize(img, (resizeWidth, resizeHeight), interpolation=cv2.INTER_LINEAR)

# ### test
# # cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("./mylib/videos/video.mp4")
# _, img = cap.read()
# ### test end

# rows, cols = img.shape[:2]
draw = img.copy()
pts_cnt = 0
pts = np.zeros((2, 2), dtype=np.float32)

dist_1m = 0

def onMouse(event, x, y, flags, param):
    global pts_cnt
    if event == cv2.EVENT_LBUTTONDOWN:
        # 좌표에 초록색 동그라미 표시
        cv2.circle(draw, (x, y), 10, (0, 255, 0), -1)
        cv2.imshow(win_name, draw)

        # 마우스 좌표 저장
        pts[pts_cnt] = (x, y)
        pts_cnt += 1
        if pts_cnt == 2:
            pointx = pts[0]
            pointy = pts[1]

            points = np.array([r for r in pts])
            standard_dst = dist.cdist(points, points, metric="euclidean")

            for i in range(0, standard_dst.shape[0]):
                for j in range(i + 1, standard_dst.shape[1]):
                    global dist_1m
                    dist_1m = standard_dst[i, j]
                    print("[INFO] 두 점 사이의 거리 계산")
                    print("1m's pixel distance: {}".format(dist_1m))
            cv2.waitKey(0)
            cv2.destroyAllWindows()

cv2.imshow(win_name, img)
cv2.setMouseCallback(win_name, onMouse)
cv2.waitKey(0)
cv2.destroyAllWindows()

### test end


for meter in range(1, 4):
    print("[INFO] Reading the images..")
    frame = cv2.imread('./mylib/images/{}m.jpg'.format(meter), cv2.IMREAD_ANYCOLOR)

    imageHeight, imageWidth = frame.shape[:2]
    resizeHeight = int(0.2 * imageHeight)
    resizeWidth = int(0.2 * imageWidth)

    # frame = imutils.resize(frame, width=700)
    frame = cv2.resize(frame, (resizeWidth, resizeHeight), interpolation=cv2.INTER_LINEAR)

    results = detect_people(frame, net, ln, personIdx=LABELS.index("person"))


    # initialize the set of indexes that violate the max/min social distance limits
    serious = set()

    # ensure there are *at least* two people detections (required in
    # order to compute our pairwise distance maps)
    if len(results) >= 2:
        # extract all centroids from the results and compute the
        # Euclidean distances between all pairs of the centroids
        centroids = np.array([r[2] for r in results])	# test
        # feets = np.array([r[3] for r in results])

        D = dist.cdist(centroids, centroids, metric="euclidean")	# centroid
        print("[INFO] centroid를 기준점으로")

        # D = dist.cdist(feets, feets, metric="euclidean")    # feet
        # print("[INFO] 발을 기준점으로")

        # loop over the upper triangular of the distance matrix
        for i in range(0, D.shape[0]):
            for j in range(i + 1, D.shape[1]):
                print("MIN_DISTANCE : {}".format(config.MIN_DISTANCE))
                print("{}m's pixel distance: {}".format(meter, D[i, j]))
                print("{}m's meter distance: {}".format(meter, D[i, j] / dist_1m))
                # check to see if the distance between any two
                # centroid pairs is less than the configured number of pixels
                if D[i, j] < config.MIN_DISTANCE:
                    # update our violation set with the indexes of the centroid pairs
                    serious.add(i)
                    serious.add(j)

    # loop over the results
    for (i, (prob, bbox, centroid, feet)) in enumerate(results):
        # extract the bounding box and centroid coordinates, then
        # initialize the color of the annotation
        (startX, startY, endX, endY) = bbox
        (cX, cY) = centroid
        (fX, fY) = feet
        color = (0, 255, 0)  # green

        # if the index pair exists within the violation/abnormal sets, then update the color
        if i in serious:
            color = (0, 0, 255)  # red

        # draw (1) a bounding box around the person and (2) the
        # centroid coordinates of the person,
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

        cv2.circle(frame, (cX, cY), 5, color, 2)	# centroid
        # cv2.circle(frame, (fX, fY), 5, color, 2)  # feet (img, 원의 중심 좌표, 반지름, 색, 선의 두께)

    ## test
    # draw some of the parameters
    Safe_Distance = "Safe distance: >{} px".format(config.MIN_DISTANCE)
    cv2.putText(frame, Safe_Distance, (frame.shape[1]-250, frame.shape[0] - 25),
                cv2.FONT_HERSHEY_COMPLEX, 0.60, (255, 0, 0), 2)

    # draw the total number of social distancing violations on the output frame
    text = "Total serious violations: {}".format(len(serious))
    cv2.putText(frame, text, (10, frame.shape[0] - 55),
                cv2.FONT_HERSHEY_COMPLEX, 0.70, (0, 0, 255), 2)

    # show the output frame
    cv2.imshow("Real-Time Monitoring/Analysis Window", frame)

    key = cv2.waitKey(0)

# close any open windows
cv2.destroyAllWindows()