Showing
2 changed files
with
182 additions
and
0 deletions
Integrated Module/labeling_module.py
0 → 100644
1 | +# plaidml | ||
2 | +# import plaidml.keras | ||
3 | +# plaidml.keras.install_backend() | ||
4 | + | ||
5 | +# packages | ||
6 | +from keras.models import load_model | ||
7 | +from keras.preprocessing import image | ||
8 | + | ||
9 | +# import queue | ||
10 | +import numpy as np | ||
11 | +from queue import Full, Empty | ||
12 | +from multiprocessing import Process, Queue | ||
13 | + | ||
14 | +class LabelingModule: | ||
15 | + def __init__(self): | ||
16 | + # self.model1 = load_model('svhn_model.h5') | ||
17 | + self.model2 = load_model('svhn_model.h5') | ||
18 | + self.image_queue = Queue(maxsize=3000) | ||
19 | + self.label_queue = Queue(maxsize=10) | ||
20 | + self.signal_queue = Queue() | ||
21 | + self.predict_process = Process(target=_predict, \ | ||
22 | + args=(self.model2, self.image_queue, self.label_queue, self.signal_queue)) | ||
23 | + | ||
24 | + def run(self): | ||
25 | + self.predict_process.start() | ||
26 | + | ||
27 | + def close(self): | ||
28 | + self.image_queue.close() | ||
29 | + self.label_queue.close() | ||
30 | + | ||
31 | + def new_tensor(self, tensor): | ||
32 | + try: | ||
33 | + self.image_queue.put(tensor) | ||
34 | + except Full: | ||
35 | + print('[LabelingModule] image_queue is full') | ||
36 | + | ||
37 | + def new_image(self, filename): | ||
38 | + tensor = self._img_to_tensor(filename) | ||
39 | + try: | ||
40 | + self.image_queue.put(tensor) | ||
41 | + except Full: | ||
42 | + print('[LabelingModule] image_queue is full') | ||
43 | + | ||
44 | + def _img_to_tensor(self, filename): | ||
45 | + img = image.load_img(filename, target_size=(48, 48)) | ||
46 | + img_tensor = image.img_to_array(img) | ||
47 | + img_tensor = np.squeeze(img_tensor) | ||
48 | + img_tensor /= 255. | ||
49 | + img_tensor = img_tensor - img_tensor.mean() | ||
50 | + return img_tensor | ||
51 | + | ||
52 | +def _predict(model, input_queue, output_queue, signal_queue): | ||
53 | + print('predict process started.') | ||
54 | + while True: | ||
55 | + try: | ||
56 | + signal = signal_queue.get_nowait() | ||
57 | + if signal == 'stop': | ||
58 | + break | ||
59 | + except Empty: | ||
60 | + pass | ||
61 | + | ||
62 | + tensor = input_queue.get(timeout=-1) | ||
63 | + dat = model.predict(np.array([tensor])) | ||
64 | + o1 = np.argmax(dat[0]) | ||
65 | + o2 = np.argmax(dat[1]) | ||
66 | + o3 = np.argmax(dat[2]) | ||
67 | + o4 = np.argmax(dat[3]) | ||
68 | + o5 = np.argmax(dat[4]) | ||
69 | + o6 = np.argmax(dat[5]) | ||
70 | + output = [o1, o2, o3, o4, o5, o6] | ||
71 | + print('[LabelingModule] predict result :', output) |
Integrated Module/main.py
0 → 100644
1 | +import cv2 | ||
2 | +import numpy as np | ||
3 | +import time | ||
4 | +from multiprocessing import Queue | ||
5 | + | ||
6 | +from labeling_module import LabelingModule | ||
7 | + | ||
8 | +fname = "./croppedimg/" | ||
9 | +index = 0 | ||
10 | +prevTime = 0 | ||
11 | + | ||
12 | +lm = LabelingModule() | ||
13 | + | ||
14 | +def filter_img(img): | ||
15 | + #이미지의 RGB값을 분석하여 찾는 실내 Tag가 맞는지 판별 | ||
16 | + img = cv2.resize(img, (10,10)) | ||
17 | + first = [0,0,0] | ||
18 | + for x_loc in range(0, 10): | ||
19 | + for y_loc in range(0, 10): | ||
20 | + bgr_value = img[x_loc,y_loc] | ||
21 | + first=first+bgr_value | ||
22 | + first[0] = first[0]/100 | ||
23 | + first[1] = first[1]/100 | ||
24 | + first[2] = first[2]/100 | ||
25 | + blue = first[0]<200 and first[0]>120 | ||
26 | + green = first[1]>120 and first[1]<210 | ||
27 | + red = first[2]>130 and first[2]<230 | ||
28 | + | ||
29 | + if(blue and green and red): | ||
30 | + return True | ||
31 | + else: | ||
32 | + return False | ||
33 | +def bboxes(inp,prevTime): | ||
34 | + #Frame을 인자로 전달받음 | ||
35 | + img = inp | ||
36 | + start = time.time() | ||
37 | + curTime = time.time() | ||
38 | + # img2gray = cv2.imread(fname,0) | ||
39 | + # img = cv2.namedWindow(img,cv2.WINDOW_NORMAL) | ||
40 | + # img = cv2.resizeWindow(img,600,600) | ||
41 | + img_final = inp | ||
42 | + # img_final = cv2.namedWindow(fname,cv2.WINDOW_NORMAL) | ||
43 | + # img_final = cv2.resizeWindow(fname,600,600) | ||
44 | + img2gray = cv2.cvtColor(inp, cv2.COLOR_BGR2GRAY) #GRAY Image 8bit per pixel | ||
45 | + ret, mask = cv2.threshold(img2gray, 180, 255, cv2.THRESH_BINARY) #threshold : distinguish background, object | ||
46 | + image_final = cv2.bitwise_and(img2gray, img2gray, mask=mask) #bitwise | ||
47 | + ret, new_img = cv2.threshold(img_final, 180, 255, cv2.THRESH_BINARY) # Nfor black text , cv.THRESH_BINARY_IV | ||
48 | + newimg = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY) #Gray Image converting | ||
49 | + #newimg = cv2.GaussianBlur(newimg, (3,3),0) | ||
50 | + | ||
51 | + # remove noise from image | ||
52 | + #kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5,1)) | ||
53 | + # to manipulate the orientation of dilution , large x means horizonatally dilating more, large y means vertically dilating more | ||
54 | + #dilated = cv2.dilate(newimg, kernel, iterations=1) # dilate | ||
55 | + # erode = cv2.erode(newimg, kernel) | ||
56 | + contours, _ = cv2.findContours(newimg, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # get contours | ||
57 | + #cv2.CHAIN_APPROX_NONE: 모든 컨투어 포인트를 반환 | ||
58 | + for contour in contours: | ||
59 | + # get rectangle bounding contour | ||
60 | + [x, y, w, h] = cv2.boundingRect(contour) | ||
61 | + | ||
62 | + # remove small false positives that aren't textq | ||
63 | + # text인식하기. width, height | ||
64 | + if w > 50 or h > 35 or w<13: | ||
65 | + continue | ||
66 | + if h / w > 1.0 or w / h > 2.0: | ||
67 | + continue | ||
68 | + if h>40 or w>70: | ||
69 | + continue | ||
70 | + if y>150: | ||
71 | + continue | ||
72 | + cropped = img_final[y :y + h , x : x + w] | ||
73 | + # draw rectangle around contour on original image | ||
74 | + if(filter_img(cropped)): | ||
75 | + cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3) | ||
76 | + cv2.putText(img,"cropped", (x-50,y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), 1) | ||
77 | + cropped = img_final[y :y + h , x : x + w] | ||
78 | + cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB) | ||
79 | + cropped = cv2.resize(cropped, (48,48)) | ||
80 | + lm.new_tensor(cropped) | ||
81 | + else: | ||
82 | + continue | ||
83 | + img = cv2.resize(img, (720, 380)) | ||
84 | + sec = curTime - prevTime | ||
85 | + prevTime = curTime | ||
86 | + try: | ||
87 | + fps = 1/(sec) | ||
88 | + except ZeroDivisionError: | ||
89 | + pass | ||
90 | + #print ("Time {0} ".format(sec)) | ||
91 | + #print ("Estimated fps {0} ".format(fps)) | ||
92 | + str1 = ("FPS : {0}".format(int(fps))) | ||
93 | + cv2.putText(img, str1, (0, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0),1) | ||
94 | + cv2.imshow('captcha_result', img) | ||
95 | + return prevTime | ||
96 | + | ||
97 | + | ||
98 | +if __name__ == "__main__": | ||
99 | + lm.predict_process.start() | ||
100 | + | ||
101 | + cap = cv2.VideoCapture(0) #동영상 파일 읽어옴 | ||
102 | + while (cap.isOpened()): | ||
103 | + ret, inp = cap.read() #프레임을 읽어옴, 읽어온 프레임을 인자로 bboxes 전달 | ||
104 | + if(ret): #success boolean | ||
105 | + prevTime = bboxes(inp, prevTime) | ||
106 | + if cv2.waitKey(1) & 0xFF == ord('q'): | ||
107 | + print("Terminate Process..") | ||
108 | + break | ||
109 | + cap.release() #파일 닫아줌 | ||
110 | + | ||
111 | + lm.predict_process.join() |
-
Please register or login to post a comment