main.py 10.2 KB
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import division, print_function, absolute_import
import sys
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import os
import datetime
from timeit import time
import warnings
import cv2
import numpy as np
import argparse
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from PIL import Image
from yolo import YOLO
import matplotlib.cbook as cbook

from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from deep_sort.detection import Detection as ddet
from collections import deque
from keras import backend
import tensorflow as tf
from tensorflow.compat.v1 import InteractiveSession
import pandas as pd
import json

def createKmeans(file_name):
    data = json.load(open(os.getcwd() + '/../deep_sort_yolov4/output/'+ file_name +'_xy.json'))

    data = pd.DataFrame(data["data"])

    xy = data[['x','y']]

    km = KMeans(n_clusters=5)
    km.fit(xy)

    predict = pd.DataFrame(km.predict(xy))
    predict.columns=['predict']
    r = pd.concat([xy,predict],axis=1)

    plt.scatter(r['x'],r['y'],c=r['predict'],alpha=0.3, s=200)


    imageFile = cbook.get_sample_data(os.getcwd() + '/../deep_sort_yolov4/output/'+ file_name + '_img.png')
    image = plt.imread(imageFile)
    plt.imshow(image)
    plt.savefig(os.getcwd() + '/public/data/'+file_name+'_kmeans.png')

def draw_border(img, pt1, pt2, color, thickness, r, d):
    x1,y1 = pt1
    x2,y2 = pt2
 
    # Top left
    cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
    cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
    cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)
 
    # Top right
    cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
    cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
    cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)
 
    # Bottom left
    cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
    cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
    cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)
 
    # Bottom right
    cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
    cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
    cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)


#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--input",help="path to input video", default = "./test_video/test3.mp4")
#ap.add_argument("-c", "--class",help="name of class", default = "person")
#args = vars(ap.parse_args())

pts = [deque(maxlen=30) for _ in range(9999)]
warnings.filterwarnings('ignore')

# initialize a list of colors to represent each possible class label
np.random.seed(100)
COLORS = np.random.randint(0, 255, size=(200, 3),
	dtype="uint8")
#list = [[] for _ in range(100)]

def main(yolo):
    df1 = pd.DataFrame(columns=['x','y','id','time','s'])
    df2 = pd.DataFrame(columns=['total','now','time','s'])
    start = datetime.datetime.now()
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    counter = []
    #deep_sort
    model_filename = os.getcwd() + '/../deep_sort_yolov4/model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)

    find_objects = ['person']
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    video_capture = cv2.VideoCapture(os.getcwd() + '/../deep_sort_yolov4/' + sys.argv[1])

    file_name = sys.argv[1].split('.')[0]
    if writeVideo_flag:
    # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        total_frame = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        fourcc = cv2.VideoWriter_fourcc(*'avc1')
        out = cv2.VideoWriter(os.getcwd() + '/public/data/'+file_name+'.mp4', fourcc, 15, (w, h))
        list_file = open(os.getcwd() + '/../deep_sort_yolov4/detection_rslt.txt', 'w')
        frame_index = -1

    fps = 0.0
    test = 1
    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        cFrame = int(video_capture.get(cv2.CAP_PROP_POS_MSEC)/76)
        #temp = pd.DataFrame({'num':cFrame},index = [0])
        #temp.to_csv(os.getcwd() + '/../deep_sort_yolov4/output/temp.csv')
        t1 = time.time()
        if(test == 1 ):
            cv2.imwrite(os.getcwd() + '/../deep_sort_yolov4/output/' + file_name + '_img.png',frame)
            test +=1

        #image = Image.fromarray(frame)
        image = Image.fromarray(frame[...,::-1]) #bgr to rgb
        boxs, confidence, class_names = yolo.detect_image(image)
        features = encoder(frame,boxs)
        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]
        cs = float(59)*int(cFrame)/total_frame
        t = start + datetime.timedelta(milliseconds=cs*1000)
        time_format = "%Y-%m-%d %H:%M:%S.%f"
        time_str = t.strftime(time_format)
        cTime = time_str

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []
        c = []
        boxes = []

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            #boxes.append([track[0], track[1], track[2], track[3]])
            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]
            #print(frame_index)
            list_file.write(str(frame_index)+',')
            list_file.write(str(track.track_id)+',')
            center = (int(((bbox[0])+(bbox[2]))/2),int(((bbox[1])+(bbox[3]))/2))
            r = (int(bbox[2]) - int(bbox[0]))/2
            draw_border(frame,(int(((bbox[0])+(bbox[2]))/2) - int(r), int(((bbox[1])+(bbox[3]))/2) - int(r)), (int(((bbox[0])+(bbox[2]))/2) + int(r), int(((bbox[1])+(bbox[3]))/2) + int(r)),(color),4,int(r/5),int(r/5))
            #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
            b0 = str(bbox[0])#.split('.')[0] + '.' + str(bbox[0]).split('.')[0][:1]
            b1 = str(bbox[1])#.split('.')[0] + '.' + str(bbox[1]).split('.')[0][:1]
            b2 = str(bbox[2]-bbox[0])#.split('.')[0] + '.' + str(bbox[3]).split('.')[0][:1]
            b3 = str(bbox[3]-bbox[1])

            list_file.write(str(b0) + ','+str(b1) + ','+str(b2) + ','+str(b3))
            #print(str(track.track_id))
            list_file.write('\n')
            #list_file.write(str(track.track_id)+',')
            #cv2.putText(frame,str(track.track_id),(int(bbox[0]), int(bbox[1] + 30)),0, 5e-3 * 150, (color),2)
            cv2.putText(frame,str(track.track_id),(int(((bbox[0])+(bbox[2]))/2), int(((bbox[1])+(bbox[3]))/2 + 30)),0, 5e-3 * 150, (color),2)

            if len(class_names) > 0:
               class_name = class_names[0]
               cv2.putText(frame, str(class_names[0]),(int(((bbox[0])+(bbox[2]))/2-30), int(((bbox[1])+(bbox[3]))/2 + 10)),0, 5e-3 * 150, (color),2)

            i += 1
            #bbox_center_point(x,y)
            #track_id[center]
            x,y = int(((bbox[0])+(bbox[2]))/2),int(((bbox[1])+(bbox[3]))/2)
            # x, y
            if (cFrame%10==0):
                df1 = df1.append({'x':x,'y':y,'id':int(track.track_id),'time':cTime,'s': cs},ignore_index=True)

            pts[track.track_id].append(center)

            thickness = 5
            #center point
            #cv2.circle(frame,  (center), 1, color, thickness)

			# draw motion path
            # for j in range(1, len(pts[track.track_id])):
            #     if pts[track.track_id][j - 1] is None or pts[track.track_id][j] is None:
            #        continue
            #     thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                #cv2.line(frame,(pts[track.track_id][j-1]), (pts[track.track_id][j]),(color),thickness)
                #cv2.putText(frame, str(class_names[j]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (255,255,255),2)

        count = len(set(counter))
        if (cFrame%10==0):
            df2 = df2.append({'total':count,'now':i,'time':cTime,'s': cs},ignore_index=True)
        cv2.putText(frame, "Total Pedestrian Counter: "+str(count),(int(50), int(105)),0, 5e-3 * 150, (0,255,0),2)
        cv2.putText(frame, "Current Pedestrian Counter: "+str(i),(int(50), int(80)),0, 5e-3 * 150, (0,255,0),2)
        #cv2.putText(frame, "FPS: %f"%(fps),(int(20), int(40)),0, 5e-3 * 200, (0,255,0),3)
        cv2.putText(frame, "Time: " + str(cTime),(int(50), int(55)),0, 5e-3 * 150, (0,255,0),3)
        cv2.namedWindow("YOLO4_Deep_SORT", 0);
        cv2.resizeWindow('YOLO4_Deep_SORT', 1024, 768);
        cv2.imshow('YOLO4_Deep_SORT', frame)


        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1


        fps  = ( fps + (1./(time.time()-t1)) ) / 2
        out.write(frame)
        frame_index = frame_index + 1

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    df1.to_json(os.getcwd() + '/../deep_sort_yolov4/output/' + file_name + '_xy.json',orient='table')
    df2.to_json(os.getcwd() + '/../deep_sort_yolov4/output/' + file_name + '_count.json',orient='table')
    createKmeans(file_name)
    print(" ")
    print("[Finish]")
    end = time.time()

	#print("[INFO]: model_image_size = (960, 960)")
    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    main(YOLO())