이재빈

Door Plate Recognition Module Added:0528

# plaidml
# import plaidml.keras
# plaidml.keras.install_backend()
# packages
from keras.models import load_model
from keras.preprocessing import image
# import queue
import numpy as np
from queue import Full, Empty
from multiprocessing import Process, Queue
class LabelingModule:
def __init__(self):
# self.model1 = load_model('svhn_model.h5')
self.model2 = load_model('svhn_model.h5')
self.image_queue = Queue(maxsize=3000)
self.label_queue = Queue(maxsize=10)
self.signal_queue = Queue()
self.predict_process = Process(target=_predict, \
args=(self.model2, self.image_queue, self.label_queue, self.signal_queue))
def run(self):
self.predict_process.start()
def close(self):
self.image_queue.close()
self.label_queue.close()
def new_tensor(self, tensor):
try:
self.image_queue.put(tensor)
except Full:
print('[LabelingModule] image_queue is full')
def new_image(self, filename):
tensor = self._img_to_tensor(filename)
try:
self.image_queue.put(tensor)
except Full:
print('[LabelingModule] image_queue is full')
def _img_to_tensor(self, filename):
img = image.load_img(filename, target_size=(48, 48))
img_tensor = image.img_to_array(img)
img_tensor = np.squeeze(img_tensor)
img_tensor /= 255.
img_tensor = img_tensor - img_tensor.mean()
return img_tensor
def _predict(model, input_queue, output_queue, signal_queue):
print('predict process started.')
while True:
try:
signal = signal_queue.get_nowait()
if signal == 'stop':
break
except Empty:
pass
tensor = input_queue.get(timeout=-1)
dat = model.predict(np.array([tensor]))
o1 = np.argmax(dat[0])
o2 = np.argmax(dat[1])
o3 = np.argmax(dat[2])
o4 = np.argmax(dat[3])
o5 = np.argmax(dat[4])
o6 = np.argmax(dat[5])
output = [o1, o2, o3, o4, o5, o6]
print('[LabelingModule] predict result :', output)
import cv2
import numpy as np
import time
from multiprocessing import Queue
from labeling_module import LabelingModule
fname = "./croppedimg/"
index = 0
prevTime = 0
lm = LabelingModule()
def filter_img(img):
#이미지의 RGB값을 분석하여 찾는 실내 Tag가 맞는지 판별
img = cv2.resize(img, (10,10))
first = [0,0,0]
for x_loc in range(0, 10):
for y_loc in range(0, 10):
bgr_value = img[x_loc,y_loc]
first=first+bgr_value
first[0] = first[0]/100
first[1] = first[1]/100
first[2] = first[2]/100
blue = first[0]<200 and first[0]>120
green = first[1]>120 and first[1]<210
red = first[2]>130 and first[2]<230
if(blue and green and red):
return True
else:
return False
def bboxes(inp,prevTime):
#Frame을 인자로 전달받음
img = inp
start = time.time()
curTime = time.time()
# img2gray = cv2.imread(fname,0)
# img = cv2.namedWindow(img,cv2.WINDOW_NORMAL)
# img = cv2.resizeWindow(img,600,600)
img_final = inp
# img_final = cv2.namedWindow(fname,cv2.WINDOW_NORMAL)
# img_final = cv2.resizeWindow(fname,600,600)
img2gray = cv2.cvtColor(inp, cv2.COLOR_BGR2GRAY) #GRAY Image 8bit per pixel
ret, mask = cv2.threshold(img2gray, 180, 255, cv2.THRESH_BINARY) #threshold : distinguish background, object
image_final = cv2.bitwise_and(img2gray, img2gray, mask=mask) #bitwise
ret, new_img = cv2.threshold(img_final, 180, 255, cv2.THRESH_BINARY) # Nfor black text , cv.THRESH_BINARY_IV
newimg = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY) #Gray Image converting
#newimg = cv2.GaussianBlur(newimg, (3,3),0)
# remove noise from image
#kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5,1))
# to manipulate the orientation of dilution , large x means horizonatally dilating more, large y means vertically dilating more
#dilated = cv2.dilate(newimg, kernel, iterations=1) # dilate
# erode = cv2.erode(newimg, kernel)
contours, _ = cv2.findContours(newimg, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # get contours
#cv2.CHAIN_APPROX_NONE: 모든 컨투어 포인트를 반환
for contour in contours:
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
# remove small false positives that aren't textq
# text인식하기. width, height
if w > 50 or h > 35 or w<13:
continue
if h / w > 1.0 or w / h > 2.0:
continue
if h>40 or w>70:
continue
if y>150:
continue
cropped = img_final[y :y + h , x : x + w]
# draw rectangle around contour on original image
if(filter_img(cropped)):
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3)
cv2.putText(img,"cropped", (x-50,y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), 1)
cropped = img_final[y :y + h , x : x + w]
cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)
cropped = cv2.resize(cropped, (48,48))
lm.new_tensor(cropped)
else:
continue
img = cv2.resize(img, (720, 380))
sec = curTime - prevTime
prevTime = curTime
try:
fps = 1/(sec)
except ZeroDivisionError:
pass
#print ("Time {0} ".format(sec))
#print ("Estimated fps {0} ".format(fps))
str1 = ("FPS : {0}".format(int(fps)))
cv2.putText(img, str1, (0, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0),1)
cv2.imshow('captcha_result', img)
return prevTime
if __name__ == "__main__":
lm.predict_process.start()
cap = cv2.VideoCapture(0) #동영상 파일 읽어옴
while (cap.isOpened()):
ret, inp = cap.read() #프레임을 읽어옴, 읽어온 프레임을 인자로 bboxes 전달
if(ret): #success boolean
prevTime = bboxes(inp, prevTime)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Terminate Process..")
break
cap.release() #파일 닫아줌
lm.predict_process.join()