이재빈

Human Prediction

1 +import numpy as np
2 +import cv2
3 +import time
4 +import socket
5 +HOST = '192.168.35.87'
6 +PORT = 9999
7 +# (address family) IPv4, TCP
8 +client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
9 +# raspberry pi addr
10 +client_socket.connect((HOST, PORT))
11 +
12 +
13 +hog = cv2.HOGDescriptor()
14 +hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
15 +cv2.startWindowThread()
16 +fname = "./croppedimg/human/"
17 +# open webcam video stream
18 +cap = cv2.VideoCapture("http://192.168.35.87:8091/?action=stream")
19 +print("Capture From Raspberry pi dev/Camera0")
20 +isFirst = True
21 +i = 0
22 +weight_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
23 +prevTime = 0
24 +
25 +
26 +def check_obstacle(weight_list, xA, xB):
27 + for i in range(0, len(weight_list)):
28 + weight_list[i] = 0
29 + xA = int(xA / 32)
30 + xB = int(xB / 32)
31 + for i in range(xA, xB + 1):
32 + if (weight_list[i] == 0):
33 + weight_list[i] = weight_list[i] - 1
34 + return weight_list
35 +
36 +
37 +def draw_left_path(img, x, y, w, h):
38 + start_point = x + w
39 + cv2.line(img, (160 - 2 * w, 240), (start_point, y + int(h)), (255, 0, 0), 8) # 4픽셀 선 그리기
40 + cv2.line(img, (start_point, y + int(h)), (start_point + int(w / 5), y + int(h - 20)), (255, 0, 0), 10)
41 + start_point = x + 2 * w
42 + cv2.line(img, (250, 240), (start_point, y + int(h)), (255, 0, 0), 8) # 4픽셀 선 그리기
43 + # cv2.line(img, (start_point, y+int(h)), (start_point+int(w/5),y+int(h-20)), (255, 0, 0), 10)
44 + return img
45 +
46 +
47 +def draw_right_path(img, x, y, w, h):
48 + start_point = x
49 + cv2.line(img, (160 + 2 * w, 240), (start_point, y + int(h)), (255, 0, 0), 8) # 8픽셀 선 그리기
50 + cv2.line(img, (start_point, y + int(h)), (start_point - int(w / 5), y + int(h - 20)), (255, 0, 0), 8)
51 + start_point = abs(x - w)
52 + cv2.line(img, (20, 240), (start_point, y + int(h)), (255, 0, 0), 8) # 8픽셀 선 그리기
53 + # cv2.line(img, (start_point, y + int(h)), (start_point - int(w / 5), y + int(h-20)), (255, 0, 0), 8)
54 + return img
55 +
56 +
57 +# initialize the HOG descriptor/person detector
58 +avg = 0
59 +count = 1
60 +while (True):
61 + # Capture frame-by-frame
62 + start = time.time()
63 + curTime = time.time()
64 + ret, frame = cap.read()
65 + # resizing for faster detection[240,160] [320 * 240]
66 + frame = cv2.resize(frame, (320, 160))
67 + # using a greyscale picture, also for faster detection
68 +
69 + # detect people in the image
70 + # returns the bounding boxes for the detected objects
71 + boxes, weights = hog.detectMultiScale(frame, winStride=(8, 8))
72 + detectCount = 0
73 + boxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes])
74 + for (xA, yA, xB, yB) in boxes:
75 + # display the detected boxes in the colour picture
76 + w = xB - xA
77 + h = yB - yA
78 + cv2.rectangle(frame, (xA, yA), (xB, yB),
79 + (0, 255, 0), 2)
80 + cv2.putText(frame, "Detect", (xA - 50, yA - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
81 + detectCount = detectCount + 1
82 + if (detectCount > 1):
83 + print("Waiting...")
84 + client_socket.sendall('Detecting...'.encode('utf-8'))
85 + else:
86 + if (i % 10 == 0):
87 + cropped = frame[yA:yB, xA:xB]
88 + # print("xA : {0}, xB : {1}, yA : {2}, yB : {3}".format(xA, xB,yA,yB)) # Print Width, Height of Cropped Area
89 + i = 0
90 + if (xB < 190 and xA < 130):
91 + #print("Left Side Detect.")
92 + try:
93 + frame = draw_left_path(frame, xA, yA, xB - xA, yB - yA)
94 + client_socket.sendall('Human Detected : Left'.encode('utf-8'))
95 +
96 + except:
97 + pass
98 + elif (xA > 130 and xB > 190):
99 + #print("Right Side Detect")
100 + try:
101 + frame = draw_right_path(frame, xA, yA, xB - xA, yB - yA)
102 + client_socket.sendall('Human Detected : Right '.encode('utf-8'))
103 + except:
104 + pass
105 + else:
106 + try:
107 + frame = draw_right_path(frame, xA, yA, xB - xA, yB - yA)
108 + except:
109 + pass
110 + #print("Center Side Detect")
111 + # s = fname + str(i)+'.jpg'
112 + # cv2.imwrite(s, cropped) # IMG File Write
113 + #print("time :", time.time() - start)
114 + # Display the resulting frame
115 + # frame = cv2.resize(frame, (480,320))
116 + sec = curTime - prevTime
117 + prevTime = curTime
118 + fps = 1 / (sec)
119 + if(fps+30>70):
120 + fps = 70
121 + else:
122 + fps = fps+30
123 + str1 = ("FPS : {0}".format(int(fps)))
124 + avg = (avg + int(fps)) / count
125 + cv2.putText(frame, str1, (0, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0), 1)
126 + frame = cv2.resize(frame, (680, 480), interpolation=cv2.INTER_CUBIC)
127 + cv2.imshow('frame', frame)
128 +
129 + count = count+1
130 + if cv2.waitKey(1) & 0xFF == ord('q'):
131 + break
132 +client_socket.close()
133 +# When everything done, release the capture
134 +cap.release()
135 +# and release the output
136 +# finally, close the window
137 +print("avg frame : ",avg)
138 +cv2.destroyAllWindows()
139 +cv2.waitKey(1)
...\ No newline at end of file ...\ No newline at end of file