test03_02copy.py 5.38 KB
# import cv2
# import numpy as np
# from matplotlib import pyplot as plt
#
# # Capturing video
# cap = cv2.VideoCapture(0)
# while True:
#     _, frame = cap.read()
#     # Plotting four circles on the video of the object you want to see the transformation of.
#     cv2.circle(frame, (114, 151), 5, (0, 0, 255), -1)
#     cv2.circle(frame, (605, 89), 5, (0, 0, 255), -1)
#     cv2.circle(frame, (72, 420), 5, (0, 0, 255), -1)
#     cv2.circle(frame, (637, 420), 5, (0, 0, 255), -1)
#     # selecting all the above four points in an array
#     imgPts = np.float32([[114, 151], [605, 89], [72, 420], [637, 420]])
#
#     # selecting four points in an array for the destination video( the one you want to see as your output)
#     objPoints = np.float32([[0, 0], [420, 0], [0, 637], [420, 637]])
#     # Apply perspective transformation function of openCV2. This function will return the matrix which you can feed into warpPerspective function to get the warped image.
#     matrix = cv2.getPerspectiveTransform(imgPts, objPoints)
#     result = cv2.warpPerspective(frame, matrix, (400, 600))
#     # Now Plotting both the videos(original, warped video)using matplotlib
#     cv2.imshow('frame', frame)
#     cv2.imshow('Perspective Transformation', result)
#     key = cv2.waitKey(1)
#     # plt.imshow(frame)
#     plt.show()
#     if key == 27:
#         break
#
# cap.release()
# cv2.destroyAllWindows()

#####

import cv2, math
import numpy as np


###
# import argparse, imutils, cv2, os, time, schedule
# #----------------------------Parse req. arguments------------------------------#
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", type=str, default="",
#     help="path to (optional) input video file")
# ap.add_argument("-o", "--output", type=str, default="",
#     help="path to (optional) output video file")
# ap.add_argument("-d", "--display", type=int, default=1,
#     help="whether or not output frame should be displayed")
# args = vars(ap.parse_args())
# #------------------------------------------------------------------------------#
#
# # if a video path was not supplied, grab a reference to the camera
# if not args.get("input", False):
#     print("[INFO] Starting the live stream..")
#     cap = cv2.VideoCapture(config.url)
#     time.sleep(2.0)
#
# # otherwise, grab a reference to the video file
# else:
#     print("[INFO] Starting the video..")
#     cap = cv2.VideoCapture(args["input"])
###

win_name = "scanning"
img = cv2.imread("./mylib/images/roi.png")
imgHeight, imgWidth = img.shape[:2]
resizeHeight = int(1 * imgHeight)
resizeWidth = int(1 * imgWidth)
img = cv2.resize(img, (resizeWidth, resizeHeight), interpolation=cv2.INTER_LINEAR)

### test
# cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("./mylib/images/video.mp4")
# _, img = cap.read()
### test end

rows, cols = img.shape[:2]
draw = img.copy()
pts_cnt = 0
pts = np.zeros((4, 2), dtype=np.float32)

def onMouse(event, x, y, flags, param):
    global pts_cnt
    if event == cv2.EVENT_LBUTTONDOWN:
        # 좌표에 초록색 동그라미 표시
        cv2.circle(draw, (x, y), 10, (0, 255, 0), -1)
        cv2.imshow(win_name, draw)

        # 마우스 좌표 저장
        pts[pts_cnt] = [x, y]
        pts_cnt += 1
        if pts_cnt == 4:
            # 좌표 4개 중 상하좌우 찾기
            sm = pts.sum(axis=1)  # 4쌍의 좌표 각각 x+y 계산
            diff = np.diff(pts, axis=1)  # 4쌍의 좌표 각각 x-y 계산

            # topLeft = pts[np.argmin(sm)]  # x+y가 가장 값이 좌상단 좌표
            # bottomRight = pts[np.argmax(sm)]  # x+y가 가장 큰 값이 우하단 좌표
            # topRight = pts[np.argmin(diff)]  # x-y가 가장 작은 것이 우상단 좌표
            # bottomLeft = pts[np.argmax(diff)]  # x-y가 가장 큰 값이 좌하단 좌표
            topLeft = pts[0]
            bottomLeft = pts[1]
            bottomRight = pts[2]
            topRight = pts[3]

            # 변환 전 4개 좌표
            pts1 = np.float32([topLeft, topRight, bottomRight, bottomLeft])

            # 변환 후 영상에 사용할 서류의 폭과 높이 계산
            # w1 = abs(bottomRight[0] - bottomLeft[0])
            # w2 = abs(topRight[0] - topLeft[0])
            # h1 = abs(topRight[1] - bottomRight[1])
            # h2 = abs(topLeft[1] - bottomLeft[1])

            w1 = int(math.sqrt(math.pow(bottomRight[0] - bottomLeft[0], 2) + math.pow(bottomRight[1] - bottomLeft[1], 2)))
            w2 = int(math.sqrt(math.pow(topRight[0] - topLeft[0], 2) + math.pow(topRight[1] - topLeft[1], 2)))
            h1 = int(math.sqrt(math.pow(topRight[0] - bottomRight[0], 2) + math.pow(topRight[1] - bottomRight[1], 2)))
            h2 = int(math.sqrt(math.pow(topLeft[0] - bottomLeft[0], 2) + math.pow(topLeft[1] - bottomLeft[1], 2)))

            width = max([w1, w2])  # 두 좌우 거리간의 최대값이 서류의 폭
            height = max([h1, h2])  # 두 상하 거리간의 최대값이 서류의 높이

            # 변환 후 4개 좌표
            pts2 = np.float32([[0, 0], [width - 1, 0],
                               [width - 1, height - 1], [0, height - 1]])

            # 변환 행렬 계산
            mtrx = cv2.getPerspectiveTransform(pts1, pts2)
            # 원근 변환 적용
            result = cv2.warpPerspective(img, mtrx, (width, height))
            cv2.imshow('scanned', result)

cv2.imshow(win_name, img)
cv2.setMouseCallback(win_name, onMouse)
cv2.waitKey(0)
cv2.destroyAllWindows()