project.py 8.46 KB
import math
import cv2
import numpy as np
from copy import deepcopy
from model.yolo_model import YOLO

class Point2D:
    def __init__(self, width, height):
        self.width = width
        self.height = height

def process_image(img):
    image = cv2.resize(img, (416, 416),
                       interpolation=cv2.INTER_CUBIC)
    image = np.array(image, dtype='float32')
    image /= 255.
    image = np.expand_dims(image, axis=0)

    return image


def get_classes(file):
    with open(file) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    return class_names


if __name__ == '__main__':
    # 파일 열기
    camera = cv2.VideoCapture("input/example.mp4")

    # Yolo 학습
    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    # 1 카운트 할 때마다 frame 얻어서 파일로 저장
    success,image = camera.read()
    count = 0
    while success:
        cv2.imwrite("mid/frame%d.png" % count, image)     # save frame as JPEG file
        success, image = camera.read()
        count += 1

    # 각 프레임 별로 Image Detection 후 프레임 번호, 객체 이름(name)과 객체의 크기(size), 객체가 얼마나 가운데 있는지(coordinatevalue) 저장
    detectionInfo = []
    for i in range(count):
        #filename = "mid/frame"+str(i)+"."
        image = cv2.imread("mid/frame%d.png" % i)
        pimage = process_image(image)

        boxes, classes, scores = yolo.predict(pimage, image.shape)
        for box, score, cl in zip(boxes, scores, classes):
            x, y, w, h = box
            name = all_classes[cl]
            size = int(w*h)

            if size <= 4000: # 사이즈가 너무 작아 썸네일로 적합하지 않은 경우
                continue
            if x <= 0 or x+w >= image.shape[1] or y <= 0 or y+h >= image.shape[0]: # 검출된 객체가 프레임 밖으로 나간 경우
                continue

            # 얼마나 가운데인지 확인하는 알고리즘
            object = Point2D(width= x + w/2, height= y + h/2)
            a = image.shape[1]/2 - object.width
            b = image.shape[0]/2 - object.height
            coordinatevalue = int(math.sqrt((a*a)+(b*b)))

            top = max(0, np.floor(x + 0.5).astype(int))
            left = max(0, np.floor(y + 0.5).astype(int))
            right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
            bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))

            # 객체 정보 및 계산 값 저장
            detectionInfo.append([i, name, size, coordinatevalue, top, left, right, bottom])

    f = open("detectionInfo.txt", 'w')
    for i in range(len(detectionInfo)):
        data = str(detectionInfo[i][0]) +", " + detectionInfo[i][1] + ", " + str(detectionInfo[i][2]) + ", " + str(detectionInfo[i][3]) + ", " + str(detectionInfo[i][4]) + ", " + str(detectionInfo[i][5]) + ", " + str(detectionInfo[i][6]) + ", " + str(detectionInfo[i][7]) + "\n"
        f.write(data)
    f.close()

    # 검출된 물체 리스트(중복 없이)
    namelist = {}
    for i in range(len(detectionInfo)):
        if not detectionInfo[i][1] in namelist:
            namelist[detectionInfo[i][1]] = []

    # 크기
    for objectName in namelist.keys():
        maxindex = 0
        maxvalue = 0
        for j in range(len(detectionInfo)):
            if detectionInfo[j][1] == objectName:
                if detectionInfo[j][2] > maxvalue:
                    maxvalue = detectionInfo[j][2]
                    maxindex = detectionInfo[j][0]
        namelist[objectName].append(maxindex)
   
    for objectname, framelist in namelist.items():
        image = cv2.imread("mid/frame%d.png" % framelist[0])
        output1 = cv2.GaussianBlur(image, (5,5), 0)
        cv2.imwrite("output1/%s.png"% (objectname), output1)    

    # 가운데 위치
    for objectName in namelist.keys():
        namelist[objectName] = []

    for objectName in namelist.keys():
        minindex = 0
        minvalue = 999999
        for j in range(len(detectionInfo)):
            if detectionInfo[j][1] == objectName:
                if detectionInfo[j][3] < minvalue:
                    minvalue = detectionInfo[j][3]
                    minindex = detectionInfo[j][0]
        namelist[objectName].append(minindex)
    
    for objectname, framelist in namelist.items():
        image = cv2.imread("mid/frame%d.png" % framelist[0])
        output2 = cv2.GaussianBlur(image, (5,5), 0)
        cv2.imwrite("output2/%s.png"% (objectname), output2)


    # 계획2 : 프레임별로 나온 객체 겹치는 부분 제외하고 넓이 구해 큰거 Indexlist에 넣기
    # 모든 프레임에 적용하지 않고 여러 객체가 나온 프레임 선정, 프레임 인덱스 저장하는 best 딕셔너리
    best = list(list(zip(*detectionInfo))[0])
    bestList= {}
    for i in range(len(detectionInfo)):
        if best.count(detectionInfo[i][0]) == 2:
            if detectionInfo[i][0] in bestList:
                bestList[detectionInfo[i][0]].append(detectionInfo[i][1:])
            else:
                bestList[detectionInfo[i][0]] = [detectionInfo[i][1:]]
        elif best.count(best[i]) > 2:
            if best[i] in bestList:
                bestList[best[i]].append([i, detectionInfo[i][3]])
            else:
                bestList[best[i]] = [[i, detectionInfo[i][3]]]

    for key, value in bestList.items():
        if len(value[0]) == 2:
            tmpValue = deepcopy(value)
            first, second = 0, 0
            indexList = list(list(zip(*tmpValue))[0])
            coordiList = list(list(zip(*tmpValue))[1])
            minCordi = coordiList.index(min(coordiList))
            first = indexList[minCordi]
            coordiList[minCordi] = 99999
            minCordi = coordiList.index(min(coordiList))
            second = indexList[minCordi]
            bestList[key] = [detectionInfo[first][1:], detectionInfo[second][1:]]

    # beOverlap 에서 선정된 두 객체의 top, left, right, bottom 값 비교하여 두 객체의 합산 size 계산, 이를 overlapped에 저장
    # 이 때, value는 항상 두 가지 값만을 가짐
    deleteList = []
    for key, value in bestList.items():
        a_top, a_left, a_right, a_bottom = value[0][3:]
        b_top, b_left, b_right, b_bottom = value[1][3:]
        o_top, o_left, o_right, o_bottom = 0, 0, 0, 0
        isOverlapped = False
        if a_top < b_top < a_bottom or b_top < a_top < b_bottom or a_left < b_left < a_right or b_left < a_left < b_right:
            isOverlapped = True
            o_top = max(a_top, b_top)
            o_bottom = min(a_bottom, b_bottom)
            o_left = max(a_left, b_left)
            o_right = min(a_right, b_right)

        o_size = (o_bottom-o_top)*(o_right-o_left)
        if isOverlapped == True:
            a_object = Point2D(width= (a_top + a_bottom)//2, height=(a_left + a_right)//2)
            b_object = Point2D(width= (b_top + b_bottom)//2, height=(b_left + b_right)//2)
            o_object = Point2D(width=(a_object.width+b_object.width)//2 , height = (a_object.height + b_object.height)//2)
            toTheOrigin_w = image.shape[1] / 2 - o_object.width
            toTheOrigin_h = image.shape[0] / 2 - o_object.height
            coordiValue_object = int(math.sqrt((toTheOrigin_w ** 2) + (toTheOrigin_h ** 2)))
            bestList[key] = [value[0][0] + ", "+ value[1][0], value[0][1]+value[1][1]-o_size, coordiValue_object]
        else:
            deleteList.append(key)
    for i in range(len(deleteList)):
        del bestList[deleteList[i]]

    namelist.clear()

    # 검출된 리스트 중 limitSize를 넘으며, 가장 중앙에 가까운 객체 선출
    limitSize = 100000
    for key, value in bestList.items():
        if value[1] > limitSize:
            if value[0] in namelist:
                if value[2] < namelist[value[0]][2]:
                    namelist[value[0]] = [key] + value[1:]
            else:
                namelist[value[0]] = [key] + value[1:]

    # output3에 출력
    for objectname, framelist in namelist.items():
        image = cv2.imread("mid/frame%d.png" % framelist[0])
        output3 = cv2.GaussianBlur(image, (5, 5), 0)
        cv2.imwrite("output3/%s.png" % (objectname), output3)



    # 계획3 : 객체가 특정 위치에 있는 프레임 뽑기

    # output1~3의 결과들을 가지고 특정 위치에 있게 이미지 크롭, 결과를 output1_1, output2_1, output3_1 에 저장








#kernel_sharpen_1 = np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])
#output2 = cv2.filter2D(output1,-1,kernel_sharpen_1)