Showing
2 changed files
with
109 additions
and
0 deletions
This diff is collapsed. Click to expand it.
Code/yogaprojectlambda.py
0 → 100644
1 | +import os | ||
2 | +import greengrasssdk | ||
3 | +from threading import Timer | ||
4 | +import time | ||
5 | +import awscam | ||
6 | +import cv2 | ||
7 | +import mo | ||
8 | +from threading import Thread | ||
9 | + | ||
10 | +# Creating a greengrass core sdk client | ||
11 | +client = greengrasssdk.client('iot-data') | ||
12 | + | ||
13 | +# The information exchanged between IoT and clould has | ||
14 | +# a topic and a message body. | ||
15 | +# This is the topic that this code uses to send messages to cloud | ||
16 | +iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME']) | ||
17 | +jpeg = None | ||
18 | +Write_To_FIFO = True | ||
19 | + | ||
20 | +class FIFO_Thread(Thread): | ||
21 | + def __init__(self): | ||
22 | + ''' Constructor. ''' | ||
23 | + Thread.__init__(self) | ||
24 | + | ||
25 | + def run(self): | ||
26 | + fifo_path = "/tmp/results.mjpeg" | ||
27 | + if not os.path.exists(fifo_path): | ||
28 | + os.mkfifo(fifo_path) | ||
29 | + f = open(fifo_path,'w') | ||
30 | + client.publish(topic=iotTopic, payload="Opened Pipe") | ||
31 | + while Write_To_FIFO: | ||
32 | + try: | ||
33 | + f.write(jpeg.tobytes()) | ||
34 | + except IOError as e: | ||
35 | + continue | ||
36 | + | ||
37 | +def greengrass_infinite_infer_run(): | ||
38 | + try: | ||
39 | + input_width = 224 | ||
40 | + input_height = 224 | ||
41 | + model_name = "image-classification" | ||
42 | + error, model_path = mo.optimize(model_name,input_width,input_height, aux_inputs={'--epoch': 2,'--precision':'FP32'}) | ||
43 | + # The aux_inputs is equal to the number of epochs and in this case, it is 300 | ||
44 | + # Load model to GPU (use {"GPU": 0} for CPU) | ||
45 | + mcfg = {"GPU": 1} | ||
46 | + model = awscam.Model(model_path, mcfg) | ||
47 | + | ||
48 | + client.publish(topic=iotTopic, payload="Model loaded") | ||
49 | + model_type = "classification" | ||
50 | + | ||
51 | + with open('caltech256_labels.txt', 'r') as f: | ||
52 | + labels = [l.rstrip() for l in f] | ||
53 | + | ||
54 | + topk = 2 | ||
55 | + results_thread = FIFO_Thread() | ||
56 | + results_thread.start() | ||
57 | + | ||
58 | + # Send a starting message to IoT console | ||
59 | + client.publish(topic=iotTopic, payload="Inference is starting") | ||
60 | + | ||
61 | + doInfer = True | ||
62 | + while doInfer: | ||
63 | + # Get a frame from the video stream | ||
64 | + ret, frame = awscam.getLastFrame() | ||
65 | + # Raise an exception if failing to get a frame | ||
66 | + if ret == False: | ||
67 | + raise Exception("Failed to get frame from the stream") | ||
68 | + | ||
69 | + # Resize frame to fit model input requirement | ||
70 | + frameResize = cv2.resize(frame, (input_width, input_height)) | ||
71 | + | ||
72 | + # Run model inference on the resized frame | ||
73 | + inferOutput = model.doInference(frameResize) | ||
74 | + | ||
75 | + # Output inference result to the fifo file so it can be viewed with mplayer | ||
76 | + parsed_results = model.parseResult(model_type, inferOutput) | ||
77 | + top_k = parsed_results[model_type][0:topk] | ||
78 | + msg = '{' | ||
79 | + prob_num = 0 | ||
80 | + for obj in top_k: | ||
81 | + if prob_num == topk-1: | ||
82 | + msg += '"{}": {:.2f}'.format(labels[obj["label"]], obj["prob"]*100) | ||
83 | + else: | ||
84 | + msg += '"{}": {:.2f},'.format(labels[obj["label"]], obj["prob"]*100) | ||
85 | + prob_num += 1 | ||
86 | + msg += "}" | ||
87 | + | ||
88 | + client.publish(topic=iotTopic, payload = msg) | ||
89 | + if top_k[0]["prob"]*100 > 65 : | ||
90 | + cv2.putText(frame, labels[top_k[0]["label"]] + ' '+ str(top_k[0]["prob"]*100), (0,22), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 165, 20), 4) | ||
91 | + global jpeg | ||
92 | + ret,jpeg = cv2.imencode('.jpg', frame) | ||
93 | + | ||
94 | + except Exception as e: | ||
95 | + msg = "myModel Lambda failed: " + str(e) | ||
96 | + client.publish(topic=iotTopic, payload=msg) | ||
97 | + | ||
98 | + # Asynchronously schedule this function to be run again in 15 seconds | ||
99 | + Timer(15, greengrass_infinite_infer_run).start() | ||
100 | + | ||
101 | + | ||
102 | +# Execute the function above | ||
103 | +greengrass_infinite_infer_run() | ||
104 | + | ||
105 | + | ||
106 | +# This is a dummy handler and will not be invoked | ||
107 | +# Instead the code above will be executed in an infinite loop for our example | ||
108 | +def function_handler(event, context): | ||
109 | + return | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
-
Please register or login to post a comment