이재빈

예측모듈 수정

1 import cv2 1 import cv2
2 -import numpy as np
3 -import matplotlib as plt
4 -import time
5 -fname = "./croppedimg/"
6 -index = 0
7 -prevTime = 0
8 -def filter_img(img):
9 - #이미지의 RGB값을 분석하여 찾는 실내 Tag가 맞는지 판별
10 - img = cv2.resize(img, (10,10))
11 - first = [0,0,0]
12 - for x_loc in range(0, 10):
13 - for y_loc in range(0, 10):
14 - bgr_value = img[x_loc,y_loc]
15 - first=first+bgr_value
16 - first[0] = first[0]/100
17 - first[1] = first[1]/100
18 - first[2] = first[2]/100
19 - blue = first[0]<200 and first[0]>120
20 - green = first[1]>120 and first[1]<210
21 - red = first[2]>130 and first[2]<230
22 2
23 - if(blue and green and red): 3 +cap = cv2.VideoCapture("http://192.168.35.87:8091/?action=stream") # 동영상 파일 읽어옴
24 - return True 4 +print(cap.isOpened())
25 - else:
26 - return False
27 -def bboxes(inp,prevTime):
28 - #Frame을 인자로 전달받음
29 - img = inp
30 - start = time.time()
31 - curTime = time.time()
32 - # img2gray = cv2.imread(fname,0)
33 - # img = cv2.namedWindow(img,cv2.WINDOW_NORMAL)
34 - # img = cv2.resizeWindow(img,600,600)
35 - img_final = inp
36 - # img_final = cv2.namedWindow(fname,cv2.WINDOW_NORMAL)
37 - # img_final = cv2.resizeWindow(fname,600,600)
38 - img2gray = cv2.cvtColor(inp, cv2.COLOR_BGR2GRAY) #GRAY Image 8bit per pixel
39 - ret, mask = cv2.threshold(img2gray, 180, 255, cv2.THRESH_BINARY) #threshold : distinguish background, object
40 - image_final = cv2.bitwise_and(img2gray, img2gray, mask=mask) #bitwise
41 - ret, new_img = cv2.threshold(img_final, 180, 255, cv2.THRESH_BINARY) # Nfor black text , cv.THRESH_BINARY_IV
42 - newimg = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY) #Gray Image converting
43 - #newimg = cv2.GaussianBlur(newimg, (3,3),0)
44 -
45 - # remove noise from image
46 - #kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5,1))
47 - # to manipulate the orientation of dilution , large x means horizonatally dilating more, large y means vertically dilating more
48 - #dilated = cv2.dilate(newimg, kernel, iterations=1) # dilate
49 - # erode = cv2.erode(newimg, kernel)
50 - _,contours, _ = cv2.findContours(newimg, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # get contours
51 - #cv2.CHAIN_APPROX_NONE: 모든 컨투어 포인트를 반환
52 - for contour in contours:
53 - # get rectangle bounding contour
54 - [x, y, w, h] = cv2.boundingRect(contour)
55 - # remove small false positives that aren't textq
56 - # text인식하기. width, height
57 - #if w > 50 or h > 35 or w<25:
58 - #continue
59 - if h / w > 1.0 or w / h > 2.0:
60 - continue
61 - #if h>40 or w>70:
62 - #continue
63 - if y>150:
64 - continue
65 - cropped = img_final[y :y + h , x : x + w]
66 - # draw rectangle around contour on original image
67 - if(filter_img(cropped)):
68 - cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3)
69 - cv2.putText(img,"cropped", (x-50,y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), 1)
70 - #cropped = img_final[y :y + h , x : x + w]
71 - else:
72 - continue
73 - img = cv2.resize(img, (720, 380))
74 - sec = curTime - prevTime
75 - prevTime = curTime
76 - fps = 1/(sec)
77 - #print ("Time {0} ".format(sec))
78 - #print ("Estimated fps {0} ".format(fps))
79 - str1 = ("FPS : {0}".format(int(fps)))
80 - cv2.putText(img, str1, (0, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0),1)
81 - cv2.imshow('captcha_result', img)
82 - return prevTime
83 -
84 -cap = cv2.VideoCapture(0) #동영상 파일 읽어옴
85 while (cap.isOpened()): 5 while (cap.isOpened()):
86 - ret, inp = cap.read() #프레임을 읽어옴, 읽어온 프레임을 인자로 bboxes 전달
87 - if(ret): #success boolean
88 - prevTime = bboxes(inp, prevTime)
89 - if cv2.waitKey(1) & 0xFF == ord('q'):
90 - print("Terminate Process..")
91 - break
92 -cap.release() #파일 닫아줌
...\ No newline at end of file ...\ No newline at end of file
6 + ret, inp = cap.read() # 프레임을 읽어옴, 읽어온 프레임을 인자로 bboxes 전달
7 + if (ret): # success boolean
8 + cv2.imshow(inp)
9 +cap.release() # 파일 닫아줌
......
...@@ -11,6 +11,7 @@ import numpy as np ...@@ -11,6 +11,7 @@ import numpy as np
11 from queue import Full, Empty 11 from queue import Full, Empty
12 from multiprocessing import Process, Queue 12 from multiprocessing import Process, Queue
13 13
14 +
14 class LabelingModule: 15 class LabelingModule:
15 def __init__(self): 16 def __init__(self):
16 # self.model1 = load_model('svhn_model.h5') 17 # self.model1 = load_model('svhn_model.h5')
...@@ -18,8 +19,7 @@ class LabelingModule: ...@@ -18,8 +19,7 @@ class LabelingModule:
18 self.image_queue = Queue(maxsize=3000) 19 self.image_queue = Queue(maxsize=3000)
19 self.label_queue = Queue(maxsize=10) 20 self.label_queue = Queue(maxsize=10)
20 self.signal_queue = Queue() 21 self.signal_queue = Queue()
21 - self.predict_process = Process(target=_predict, \ 22 + self.predict_process = Process(target=_predict, args=(self.model2, self.image_queue, self.label_queue, self.signal_queue))
22 - args=(self.model2, self.image_queue, self.label_queue, self.signal_queue))
23 23
24 def run(self): 24 def run(self):
25 self.predict_process.start() 25 self.predict_process.start()
...@@ -49,6 +49,7 @@ class LabelingModule: ...@@ -49,6 +49,7 @@ class LabelingModule:
49 img_tensor = img_tensor - img_tensor.mean() 49 img_tensor = img_tensor - img_tensor.mean()
50 return img_tensor 50 return img_tensor
51 51
52 +
52 def _predict(model, input_queue, output_queue, signal_queue): 53 def _predict(model, input_queue, output_queue, signal_queue):
53 print('predict process started.') 54 print('predict process started.')
54 while True: 55 while True:
...@@ -58,7 +59,7 @@ def _predict(model, input_queue, output_queue, signal_queue): ...@@ -58,7 +59,7 @@ def _predict(model, input_queue, output_queue, signal_queue):
58 break 59 break
59 except Empty: 60 except Empty:
60 pass 61 pass
61 - 62 +
62 tensor = input_queue.get(timeout=-1) 63 tensor = input_queue.get(timeout=-1)
63 dat = model.predict(np.array([tensor])) 64 dat = model.predict(np.array([tensor]))
64 o1 = np.argmax(dat[0]) 65 o1 = np.argmax(dat[0])
......