최지우

add Lambda code & SageMaker code

......@@ -48,14 +48,31 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"collapsed": true,
"tags": [
"parameters"
]
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"The method get_image_uri has been renamed in sagemaker>=2.\n",
"See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n",
"Defaulting to the only supported framework/algorithm version: 1. Ignoring framework/algorithm version: 1.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 906 ms, sys: 140 ms, total: 1.05 s\n",
"Wall time: 10.3 s\n"
]
}
],
"source": [
"%%time\n",
"import boto3\n",
......@@ -66,7 +83,7 @@
"\n",
"role = get_execution_role()\n",
"\n",
"bucket = sagemaker.Session().default_bucket()\n",
"bucket = 'deeplens-sagemaker-yogaproject'\n",
"\n",
"training_image = get_image_uri(boto3.Session().region_name, 'image-classification')"
]
......@@ -81,39 +98,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import os \n",
"import urllib.request\n",
"import boto3\n",
"\n",
"def download(url):\n",
" filename = url.split(\"/\")[-1]\n",
" if not os.path.exists(filename):\n",
" urllib.request.urlretrieve(url, filename)\n",
"\n",
" \n",
"def upload_to_s3(channel, file):\n",
" s3 = boto3.resource('s3')\n",
" data = open(file, \"rb\")\n",
" key = channel + '/' + file\n",
" s3.Bucket(bucket).put_object(Key=key, Body=data)\n",
"\n",
"\n",
"# caltech-256\n",
"s3_train_key = \"image-classification-full-training/train\"\n",
"s3_validation_key = \"image-classification-full-training/validation\"\n",
"s3_train = 's3://{}/{}/'.format(bucket, s3_train_key)\n",
"s3_validation = 's3://{}/{}/'.format(bucket, s3_validation_key)\n",
"\n",
"download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')\n",
"upload_to_s3(s3_train_key, 'caltech-256-60-train.rec')\n",
"download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')\n",
"upload_to_s3(s3_validation_key, 'caltech-256-60-val.rec')"
"s3_validation = 's3://{}/{}/'.format(bucket, s3_validation_key)"
]
},
{
......@@ -152,10 +151,8 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200\n",
......@@ -165,9 +162,9 @@
"image_shape = \"3,224,224\"\n",
"# we also need to specify the number of training samples in the training set\n",
"# for caltech it is 15420\n",
"num_training_samples = \"15420\"\n",
"num_training_samples = \"600\"\n",
"# specify the number of output classes\n",
"num_classes = \"257\"\n",
"num_classes = \"3\"\n",
"# batch size for training\n",
"mini_batch_size = \"64\"\n",
"# number of epochs\n",
......@@ -186,12 +183,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {
"collapsed": true,
"scrolled": true
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training job name: JOB--2020-11-10-08-44-40\n",
"\n",
"Input Data Location: {'S3DataType': 'S3Prefix', 'S3Uri': 's3://deeplens-sagemaker-yogaproject/image-classification-full-training/train/', 'S3DataDistributionType': 'FullyReplicated'}\n",
"CPU times: user 62.6 ms, sys: 4.15 ms, total: 66.8 ms\n",
"Wall time: 1.19 s\n"
]
}
],
"source": [
"%%time\n",
"import time\n",
......@@ -201,7 +209,7 @@
"\n",
"s3 = boto3.client('s3')\n",
"# create unique job name\n",
"job_name_prefix = 'DEMO-imageclassification'\n",
"job_name_prefix = 'JOB'\n",
"job_name = job_name_prefix + '-' + time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n",
"training_params = \\\n",
"{\n",
......@@ -268,11 +276,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training job current status: InProgress\n",
"Training job ended with status: Completed\n"
]
}
],
"source": [
"# create the Amazon SageMaker training job\n",
"sagemaker = boto3.client(service_name='sagemaker')\n",
......@@ -297,11 +312,17 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training job ended with status: Completed\n"
]
}
],
"source": [
"training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\n",
"status = training_info['TrainingJobStatus']\n",
......@@ -349,11 +370,30 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"The method get_image_uri has been renamed in sagemaker>=2.\n",
"See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n",
"Defaulting to the only supported framework/algorithm version: 1. Ignoring framework/algorithm version: 1.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"DEMO-full-image-classification-model-2020-11-10-08-51-55\n",
"s3://deeplens-sagemaker-yogaproject/JOB/output/JOB--2020-11-10-08-44-40/output/model.tar.gz\n",
"arn:aws:sagemaker:us-east-1:304659765988:model/demo-full-image-classification-model-2020-11-10-08-51-55\n",
"CPU times: user 93.2 ms, sys: 9.06 ms, total: 102 ms\n",
"Wall time: 1.54 s\n"
]
}
],
"source": [
"%%time\n",
"import boto3\n",
......@@ -410,24 +450,31 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"batch_input = 's3://{}/image-classification-full-training/test/'.format(bucket)\n",
"test_images = '/tmp/images/008.bathtub'\n",
"\n",
"!aws s3 cp $test_images $batch_input --recursive --quiet "
"batch_input = 's3://{}/image-classification-full-training/test/'.format(bucket)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Transform job name: image-classification-models-2020-11-10-08-52-35\n",
"\n",
"Input Data Location: s3://deeplens-sagemaker-yogaproject/image-classification-full-training/validation/\n"
]
}
],
"source": [
"timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n",
"batch_job_name = \"image-classification-model\" + timestamp\n",
"batch_job_name = \"image-classification-models\" + timestamp\n",
"request = \\\n",
"{\n",
" \"TransformJobName\": batch_job_name,\n",
......@@ -450,7 +497,7 @@
" \"CompressionType\": \"None\"\n",
" },\n",
" \"TransformResources\": {\n",
" \"InstanceType\": \"ml.p2.xlarge\",\n",
" \"InstanceType\": \"ml.c5.xlarge\",\n",
" \"InstanceCount\": 1\n",
" }\n",
"}\n",
......@@ -461,9 +508,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Created Transform job with name: image-classification-models-2020-11-10-08-52-35\n",
"Transform job ended with status: Completed\n"
]
}
],
"source": [
"sagemaker = boto3.client('sagemaker')\n",
"sagemaker.create_transform_job(**request)\n",
......@@ -480,7 +536,7 @@
" message = response['FailureReason']\n",
" print('Transform failed with the following error: {}'.format(message))\n",
" raise Exception('Transform job failed') \n",
" time.sleep(30) "
" time.sleep(30) "
]
},
{
......@@ -492,17 +548,54 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sample inputs: ['image-classification-full-training/test/', 'image-classification-full-training/test/1.one-minute-yoga-at-home-tree-pose-video-beginner.jpg']\n",
"Sample output: ['image-classification-models-2020-11-10-08-52-35/output/1.one-minute-yoga-at-home-tree-pose-video-beginner.jpg.out', 'image-classification-models-2020-11-10-08-52-35/output/10.maxresdefault.jpg.out']\n",
"Result: label - plank, probability - 0.9452986717224121\n",
"Result: label - plank, probability - 0.5059212446212769\n",
"Result: label - plank, probability - 0.908556342124939\n",
"Result: label - plank, probability - 0.9969107508659363\n",
"Result: label - tree, probability - 0.7288743853569031\n",
"Result: label - plank, probability - 0.589160680770874\n",
"Result: label - tree, probability - 0.7094720602035522\n",
"Result: label - tree, probability - 0.6348884105682373\n",
"Result: label - tree, probability - 0.9513751864433289\n",
"Result: label - tree, probability - 0.9830145835876465\n"
]
},
{
"data": {
"text/plain": [
"[('plank', 0.9452986717224121),\n",
" ('plank', 0.5059212446212769),\n",
" ('plank', 0.908556342124939),\n",
" ('plank', 0.9969107508659363),\n",
" ('tree', 0.7288743853569031),\n",
" ('plank', 0.589160680770874),\n",
" ('tree', 0.7094720602035522),\n",
" ('tree', 0.6348884105682373),\n",
" ('tree', 0.9513751864433289),\n",
" ('tree', 0.9830145835876465)]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from urllib.parse import urlparse\n",
"import json\n",
"import numpy as np\n",
"\n",
"s3_client = boto3.client('s3')\n",
"object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']\n",
"\n",
"object_categories = ['tree', 'plank']\n",
"def list_objects(s3_client, bucket, prefix):\n",
" response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)\n",
" objects = [content['Key'] for content in response['Contents']]\n",
......@@ -782,7 +875,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
"version": "3.6.10"
}
},
"nbformat": 4,
......
import os
import greengrasssdk
from threading import Timer
import time
import awscam
import cv2
import mo
from threading import Thread
# Creating a greengrass core sdk client
client = greengrasssdk.client('iot-data')
# The information exchanged between IoT and clould has
# a topic and a message body.
# This is the topic that this code uses to send messages to cloud
iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
jpeg = None
Write_To_FIFO = True
class FIFO_Thread(Thread):
def __init__(self):
''' Constructor. '''
Thread.__init__(self)
def run(self):
fifo_path = "/tmp/results.mjpeg"
if not os.path.exists(fifo_path):
os.mkfifo(fifo_path)
f = open(fifo_path,'w')
client.publish(topic=iotTopic, payload="Opened Pipe")
while Write_To_FIFO:
try:
f.write(jpeg.tobytes())
except IOError as e:
continue
def greengrass_infinite_infer_run():
try:
input_width = 224
input_height = 224
model_name = "image-classification"
error, model_path = mo.optimize(model_name,input_width,input_height, aux_inputs={'--epoch': 2,'--precision':'FP32'})
# The aux_inputs is equal to the number of epochs and in this case, it is 300
# Load model to GPU (use {"GPU": 0} for CPU)
mcfg = {"GPU": 1}
model = awscam.Model(model_path, mcfg)
client.publish(topic=iotTopic, payload="Model loaded")
model_type = "classification"
with open('caltech256_labels.txt', 'r') as f:
labels = [l.rstrip() for l in f]
topk = 2
results_thread = FIFO_Thread()
results_thread.start()
# Send a starting message to IoT console
client.publish(topic=iotTopic, payload="Inference is starting")
doInfer = True
while doInfer:
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
# Raise an exception if failing to get a frame
if ret == False:
raise Exception("Failed to get frame from the stream")
# Resize frame to fit model input requirement
frameResize = cv2.resize(frame, (input_width, input_height))
# Run model inference on the resized frame
inferOutput = model.doInference(frameResize)
# Output inference result to the fifo file so it can be viewed with mplayer
parsed_results = model.parseResult(model_type, inferOutput)
top_k = parsed_results[model_type][0:topk]
msg = '{'
prob_num = 0
for obj in top_k:
if prob_num == topk-1:
msg += '"{}": {:.2f}'.format(labels[obj["label"]], obj["prob"]*100)
else:
msg += '"{}": {:.2f},'.format(labels[obj["label"]], obj["prob"]*100)
prob_num += 1
msg += "}"
client.publish(topic=iotTopic, payload = msg)
if top_k[0]["prob"]*100 > 65 :
cv2.putText(frame, labels[top_k[0]["label"]] + ' '+ str(top_k[0]["prob"]*100), (0,22), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 165, 20), 4)
global jpeg
ret,jpeg = cv2.imencode('.jpg', frame)
except Exception as e:
msg = "myModel Lambda failed: " + str(e)
client.publish(topic=iotTopic, payload=msg)
# Asynchronously schedule this function to be run again in 15 seconds
Timer(15, greengrass_infinite_infer_run).start()
# Execute the function above
greengrass_infinite_infer_run()
# This is a dummy handler and will not be invoked
# Instead the code above will be executed in an infinite loop for our example
def function_handler(event, context):
return
\ No newline at end of file