Showing
2 changed files
with
269 additions
and
67 deletions
... | @@ -48,14 +48,31 @@ | ... | @@ -48,14 +48,31 @@ |
48 | }, | 48 | }, |
49 | { | 49 | { |
50 | "cell_type": "code", | 50 | "cell_type": "code", |
51 | - "execution_count": null, | 51 | + "execution_count": 1, |
52 | "metadata": { | 52 | "metadata": { |
53 | - "collapsed": true, | ||
54 | "tags": [ | 53 | "tags": [ |
55 | "parameters" | 54 | "parameters" |
56 | ] | 55 | ] |
57 | }, | 56 | }, |
58 | - "outputs": [], | 57 | + "outputs": [ |
58 | + { | ||
59 | + "name": "stderr", | ||
60 | + "output_type": "stream", | ||
61 | + "text": [ | ||
62 | + "The method get_image_uri has been renamed in sagemaker>=2.\n", | ||
63 | + "See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n", | ||
64 | + "Defaulting to the only supported framework/algorithm version: 1. Ignoring framework/algorithm version: 1.\n" | ||
65 | + ] | ||
66 | + }, | ||
67 | + { | ||
68 | + "name": "stdout", | ||
69 | + "output_type": "stream", | ||
70 | + "text": [ | ||
71 | + "CPU times: user 906 ms, sys: 140 ms, total: 1.05 s\n", | ||
72 | + "Wall time: 10.3 s\n" | ||
73 | + ] | ||
74 | + } | ||
75 | + ], | ||
59 | "source": [ | 76 | "source": [ |
60 | "%%time\n", | 77 | "%%time\n", |
61 | "import boto3\n", | 78 | "import boto3\n", |
... | @@ -66,7 +83,7 @@ | ... | @@ -66,7 +83,7 @@ |
66 | "\n", | 83 | "\n", |
67 | "role = get_execution_role()\n", | 84 | "role = get_execution_role()\n", |
68 | "\n", | 85 | "\n", |
69 | - "bucket = sagemaker.Session().default_bucket()\n", | 86 | + "bucket = 'deeplens-sagemaker-yogaproject'\n", |
70 | "\n", | 87 | "\n", |
71 | "training_image = get_image_uri(boto3.Session().region_name, 'image-classification')" | 88 | "training_image = get_image_uri(boto3.Session().region_name, 'image-classification')" |
72 | ] | 89 | ] |
... | @@ -81,39 +98,21 @@ | ... | @@ -81,39 +98,21 @@ |
81 | }, | 98 | }, |
82 | { | 99 | { |
83 | "cell_type": "code", | 100 | "cell_type": "code", |
84 | - "execution_count": null, | 101 | + "execution_count": 2, |
85 | - "metadata": { | 102 | + "metadata": {}, |
86 | - "collapsed": true | ||
87 | - }, | ||
88 | "outputs": [], | 103 | "outputs": [], |
89 | "source": [ | 104 | "source": [ |
90 | "import os \n", | 105 | "import os \n", |
91 | "import urllib.request\n", | 106 | "import urllib.request\n", |
92 | "import boto3\n", | 107 | "import boto3\n", |
93 | "\n", | 108 | "\n", |
94 | - "def download(url):\n", | ||
95 | - " filename = url.split(\"/\")[-1]\n", | ||
96 | - " if not os.path.exists(filename):\n", | ||
97 | - " urllib.request.urlretrieve(url, filename)\n", | ||
98 | - "\n", | ||
99 | - " \n", | ||
100 | - "def upload_to_s3(channel, file):\n", | ||
101 | - " s3 = boto3.resource('s3')\n", | ||
102 | - " data = open(file, \"rb\")\n", | ||
103 | - " key = channel + '/' + file\n", | ||
104 | - " s3.Bucket(bucket).put_object(Key=key, Body=data)\n", | ||
105 | "\n", | 109 | "\n", |
106 | "\n", | 110 | "\n", |
107 | "# caltech-256\n", | 111 | "# caltech-256\n", |
108 | "s3_train_key = \"image-classification-full-training/train\"\n", | 112 | "s3_train_key = \"image-classification-full-training/train\"\n", |
109 | "s3_validation_key = \"image-classification-full-training/validation\"\n", | 113 | "s3_validation_key = \"image-classification-full-training/validation\"\n", |
110 | "s3_train = 's3://{}/{}/'.format(bucket, s3_train_key)\n", | 114 | "s3_train = 's3://{}/{}/'.format(bucket, s3_train_key)\n", |
111 | - "s3_validation = 's3://{}/{}/'.format(bucket, s3_validation_key)\n", | 115 | + "s3_validation = 's3://{}/{}/'.format(bucket, s3_validation_key)" |
112 | - "\n", | ||
113 | - "download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')\n", | ||
114 | - "upload_to_s3(s3_train_key, 'caltech-256-60-train.rec')\n", | ||
115 | - "download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')\n", | ||
116 | - "upload_to_s3(s3_validation_key, 'caltech-256-60-val.rec')" | ||
117 | ] | 116 | ] |
118 | }, | 117 | }, |
119 | { | 118 | { |
... | @@ -152,10 +151,8 @@ | ... | @@ -152,10 +151,8 @@ |
152 | }, | 151 | }, |
153 | { | 152 | { |
154 | "cell_type": "code", | 153 | "cell_type": "code", |
155 | - "execution_count": null, | 154 | + "execution_count": 3, |
156 | - "metadata": { | 155 | + "metadata": {}, |
157 | - "collapsed": true | ||
158 | - }, | ||
159 | "outputs": [], | 156 | "outputs": [], |
160 | "source": [ | 157 | "source": [ |
161 | "# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200\n", | 158 | "# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200\n", |
... | @@ -165,9 +162,9 @@ | ... | @@ -165,9 +162,9 @@ |
165 | "image_shape = \"3,224,224\"\n", | 162 | "image_shape = \"3,224,224\"\n", |
166 | "# we also need to specify the number of training samples in the training set\n", | 163 | "# we also need to specify the number of training samples in the training set\n", |
167 | "# for caltech it is 15420\n", | 164 | "# for caltech it is 15420\n", |
168 | - "num_training_samples = \"15420\"\n", | 165 | + "num_training_samples = \"600\"\n", |
169 | "# specify the number of output classes\n", | 166 | "# specify the number of output classes\n", |
170 | - "num_classes = \"257\"\n", | 167 | + "num_classes = \"3\"\n", |
171 | "# batch size for training\n", | 168 | "# batch size for training\n", |
172 | "mini_batch_size = \"64\"\n", | 169 | "mini_batch_size = \"64\"\n", |
173 | "# number of epochs\n", | 170 | "# number of epochs\n", |
... | @@ -186,12 +183,23 @@ | ... | @@ -186,12 +183,23 @@ |
186 | }, | 183 | }, |
187 | { | 184 | { |
188 | "cell_type": "code", | 185 | "cell_type": "code", |
189 | - "execution_count": null, | 186 | + "execution_count": 4, |
190 | "metadata": { | 187 | "metadata": { |
191 | - "collapsed": true, | ||
192 | "scrolled": true | 188 | "scrolled": true |
193 | }, | 189 | }, |
194 | - "outputs": [], | 190 | + "outputs": [ |
191 | + { | ||
192 | + "name": "stdout", | ||
193 | + "output_type": "stream", | ||
194 | + "text": [ | ||
195 | + "Training job name: JOB--2020-11-10-08-44-40\n", | ||
196 | + "\n", | ||
197 | + "Input Data Location: {'S3DataType': 'S3Prefix', 'S3Uri': 's3://deeplens-sagemaker-yogaproject/image-classification-full-training/train/', 'S3DataDistributionType': 'FullyReplicated'}\n", | ||
198 | + "CPU times: user 62.6 ms, sys: 4.15 ms, total: 66.8 ms\n", | ||
199 | + "Wall time: 1.19 s\n" | ||
200 | + ] | ||
201 | + } | ||
202 | + ], | ||
195 | "source": [ | 203 | "source": [ |
196 | "%%time\n", | 204 | "%%time\n", |
197 | "import time\n", | 205 | "import time\n", |
... | @@ -201,7 +209,7 @@ | ... | @@ -201,7 +209,7 @@ |
201 | "\n", | 209 | "\n", |
202 | "s3 = boto3.client('s3')\n", | 210 | "s3 = boto3.client('s3')\n", |
203 | "# create unique job name\n", | 211 | "# create unique job name\n", |
204 | - "job_name_prefix = 'DEMO-imageclassification'\n", | 212 | + "job_name_prefix = 'JOB'\n", |
205 | "job_name = job_name_prefix + '-' + time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n", | 213 | "job_name = job_name_prefix + '-' + time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n", |
206 | "training_params = \\\n", | 214 | "training_params = \\\n", |
207 | "{\n", | 215 | "{\n", |
... | @@ -268,11 +276,18 @@ | ... | @@ -268,11 +276,18 @@ |
268 | }, | 276 | }, |
269 | { | 277 | { |
270 | "cell_type": "code", | 278 | "cell_type": "code", |
271 | - "execution_count": null, | 279 | + "execution_count": 5, |
272 | - "metadata": { | 280 | + "metadata": {}, |
273 | - "collapsed": true | 281 | + "outputs": [ |
274 | - }, | 282 | + { |
275 | - "outputs": [], | 283 | + "name": "stdout", |
284 | + "output_type": "stream", | ||
285 | + "text": [ | ||
286 | + "Training job current status: InProgress\n", | ||
287 | + "Training job ended with status: Completed\n" | ||
288 | + ] | ||
289 | + } | ||
290 | + ], | ||
276 | "source": [ | 291 | "source": [ |
277 | "# create the Amazon SageMaker training job\n", | 292 | "# create the Amazon SageMaker training job\n", |
278 | "sagemaker = boto3.client(service_name='sagemaker')\n", | 293 | "sagemaker = boto3.client(service_name='sagemaker')\n", |
... | @@ -297,11 +312,17 @@ | ... | @@ -297,11 +312,17 @@ |
297 | }, | 312 | }, |
298 | { | 313 | { |
299 | "cell_type": "code", | 314 | "cell_type": "code", |
300 | - "execution_count": null, | 315 | + "execution_count": 6, |
301 | - "metadata": { | 316 | + "metadata": {}, |
302 | - "collapsed": true | 317 | + "outputs": [ |
303 | - }, | 318 | + { |
304 | - "outputs": [], | 319 | + "name": "stdout", |
320 | + "output_type": "stream", | ||
321 | + "text": [ | ||
322 | + "Training job ended with status: Completed\n" | ||
323 | + ] | ||
324 | + } | ||
325 | + ], | ||
305 | "source": [ | 326 | "source": [ |
306 | "training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\n", | 327 | "training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\n", |
307 | "status = training_info['TrainingJobStatus']\n", | 328 | "status = training_info['TrainingJobStatus']\n", |
... | @@ -349,11 +370,30 @@ | ... | @@ -349,11 +370,30 @@ |
349 | }, | 370 | }, |
350 | { | 371 | { |
351 | "cell_type": "code", | 372 | "cell_type": "code", |
352 | - "execution_count": null, | 373 | + "execution_count": 7, |
353 | - "metadata": { | 374 | + "metadata": {}, |
354 | - "collapsed": true | 375 | + "outputs": [ |
355 | - }, | 376 | + { |
356 | - "outputs": [], | 377 | + "name": "stderr", |
378 | + "output_type": "stream", | ||
379 | + "text": [ | ||
380 | + "The method get_image_uri has been renamed in sagemaker>=2.\n", | ||
381 | + "See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n", | ||
382 | + "Defaulting to the only supported framework/algorithm version: 1. Ignoring framework/algorithm version: 1.\n" | ||
383 | + ] | ||
384 | + }, | ||
385 | + { | ||
386 | + "name": "stdout", | ||
387 | + "output_type": "stream", | ||
388 | + "text": [ | ||
389 | + "DEMO-full-image-classification-model-2020-11-10-08-51-55\n", | ||
390 | + "s3://deeplens-sagemaker-yogaproject/JOB/output/JOB--2020-11-10-08-44-40/output/model.tar.gz\n", | ||
391 | + "arn:aws:sagemaker:us-east-1:304659765988:model/demo-full-image-classification-model-2020-11-10-08-51-55\n", | ||
392 | + "CPU times: user 93.2 ms, sys: 9.06 ms, total: 102 ms\n", | ||
393 | + "Wall time: 1.54 s\n" | ||
394 | + ] | ||
395 | + } | ||
396 | + ], | ||
357 | "source": [ | 397 | "source": [ |
358 | "%%time\n", | 398 | "%%time\n", |
359 | "import boto3\n", | 399 | "import boto3\n", |
... | @@ -410,24 +450,31 @@ | ... | @@ -410,24 +450,31 @@ |
410 | }, | 450 | }, |
411 | { | 451 | { |
412 | "cell_type": "code", | 452 | "cell_type": "code", |
413 | - "execution_count": null, | 453 | + "execution_count": 8, |
414 | "metadata": {}, | 454 | "metadata": {}, |
415 | "outputs": [], | 455 | "outputs": [], |
416 | "source": [ | 456 | "source": [ |
417 | - "batch_input = 's3://{}/image-classification-full-training/test/'.format(bucket)\n", | 457 | + "batch_input = 's3://{}/image-classification-full-training/test/'.format(bucket)" |
418 | - "test_images = '/tmp/images/008.bathtub'\n", | ||
419 | - "\n", | ||
420 | - "!aws s3 cp $test_images $batch_input --recursive --quiet " | ||
421 | ] | 458 | ] |
422 | }, | 459 | }, |
423 | { | 460 | { |
424 | "cell_type": "code", | 461 | "cell_type": "code", |
425 | - "execution_count": null, | 462 | + "execution_count": 9, |
426 | "metadata": {}, | 463 | "metadata": {}, |
427 | - "outputs": [], | 464 | + "outputs": [ |
465 | + { | ||
466 | + "name": "stdout", | ||
467 | + "output_type": "stream", | ||
468 | + "text": [ | ||
469 | + "Transform job name: image-classification-models-2020-11-10-08-52-35\n", | ||
470 | + "\n", | ||
471 | + "Input Data Location: s3://deeplens-sagemaker-yogaproject/image-classification-full-training/validation/\n" | ||
472 | + ] | ||
473 | + } | ||
474 | + ], | ||
428 | "source": [ | 475 | "source": [ |
429 | "timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n", | 476 | "timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\n", |
430 | - "batch_job_name = \"image-classification-model\" + timestamp\n", | 477 | + "batch_job_name = \"image-classification-models\" + timestamp\n", |
431 | "request = \\\n", | 478 | "request = \\\n", |
432 | "{\n", | 479 | "{\n", |
433 | " \"TransformJobName\": batch_job_name,\n", | 480 | " \"TransformJobName\": batch_job_name,\n", |
... | @@ -450,7 +497,7 @@ | ... | @@ -450,7 +497,7 @@ |
450 | " \"CompressionType\": \"None\"\n", | 497 | " \"CompressionType\": \"None\"\n", |
451 | " },\n", | 498 | " },\n", |
452 | " \"TransformResources\": {\n", | 499 | " \"TransformResources\": {\n", |
453 | - " \"InstanceType\": \"ml.p2.xlarge\",\n", | 500 | + " \"InstanceType\": \"ml.c5.xlarge\",\n", |
454 | " \"InstanceCount\": 1\n", | 501 | " \"InstanceCount\": 1\n", |
455 | " }\n", | 502 | " }\n", |
456 | "}\n", | 503 | "}\n", |
... | @@ -461,9 +508,18 @@ | ... | @@ -461,9 +508,18 @@ |
461 | }, | 508 | }, |
462 | { | 509 | { |
463 | "cell_type": "code", | 510 | "cell_type": "code", |
464 | - "execution_count": null, | 511 | + "execution_count": 10, |
465 | "metadata": {}, | 512 | "metadata": {}, |
466 | - "outputs": [], | 513 | + "outputs": [ |
514 | + { | ||
515 | + "name": "stdout", | ||
516 | + "output_type": "stream", | ||
517 | + "text": [ | ||
518 | + "Created Transform job with name: image-classification-models-2020-11-10-08-52-35\n", | ||
519 | + "Transform job ended with status: Completed\n" | ||
520 | + ] | ||
521 | + } | ||
522 | + ], | ||
467 | "source": [ | 523 | "source": [ |
468 | "sagemaker = boto3.client('sagemaker')\n", | 524 | "sagemaker = boto3.client('sagemaker')\n", |
469 | "sagemaker.create_transform_job(**request)\n", | 525 | "sagemaker.create_transform_job(**request)\n", |
... | @@ -480,7 +536,7 @@ | ... | @@ -480,7 +536,7 @@ |
480 | " message = response['FailureReason']\n", | 536 | " message = response['FailureReason']\n", |
481 | " print('Transform failed with the following error: {}'.format(message))\n", | 537 | " print('Transform failed with the following error: {}'.format(message))\n", |
482 | " raise Exception('Transform job failed') \n", | 538 | " raise Exception('Transform job failed') \n", |
483 | - " time.sleep(30) " | 539 | + " time.sleep(30) " |
484 | ] | 540 | ] |
485 | }, | 541 | }, |
486 | { | 542 | { |
... | @@ -492,17 +548,54 @@ | ... | @@ -492,17 +548,54 @@ |
492 | }, | 548 | }, |
493 | { | 549 | { |
494 | "cell_type": "code", | 550 | "cell_type": "code", |
495 | - "execution_count": null, | 551 | + "execution_count": 11, |
496 | "metadata": {}, | 552 | "metadata": {}, |
497 | - "outputs": [], | 553 | + "outputs": [ |
554 | + { | ||
555 | + "name": "stdout", | ||
556 | + "output_type": "stream", | ||
557 | + "text": [ | ||
558 | + "Sample inputs: ['image-classification-full-training/test/', 'image-classification-full-training/test/1.one-minute-yoga-at-home-tree-pose-video-beginner.jpg']\n", | ||
559 | + "Sample output: ['image-classification-models-2020-11-10-08-52-35/output/1.one-minute-yoga-at-home-tree-pose-video-beginner.jpg.out', 'image-classification-models-2020-11-10-08-52-35/output/10.maxresdefault.jpg.out']\n", | ||
560 | + "Result: label - plank, probability - 0.9452986717224121\n", | ||
561 | + "Result: label - plank, probability - 0.5059212446212769\n", | ||
562 | + "Result: label - plank, probability - 0.908556342124939\n", | ||
563 | + "Result: label - plank, probability - 0.9969107508659363\n", | ||
564 | + "Result: label - tree, probability - 0.7288743853569031\n", | ||
565 | + "Result: label - plank, probability - 0.589160680770874\n", | ||
566 | + "Result: label - tree, probability - 0.7094720602035522\n", | ||
567 | + "Result: label - tree, probability - 0.6348884105682373\n", | ||
568 | + "Result: label - tree, probability - 0.9513751864433289\n", | ||
569 | + "Result: label - tree, probability - 0.9830145835876465\n" | ||
570 | + ] | ||
571 | + }, | ||
572 | + { | ||
573 | + "data": { | ||
574 | + "text/plain": [ | ||
575 | + "[('plank', 0.9452986717224121),\n", | ||
576 | + " ('plank', 0.5059212446212769),\n", | ||
577 | + " ('plank', 0.908556342124939),\n", | ||
578 | + " ('plank', 0.9969107508659363),\n", | ||
579 | + " ('tree', 0.7288743853569031),\n", | ||
580 | + " ('plank', 0.589160680770874),\n", | ||
581 | + " ('tree', 0.7094720602035522),\n", | ||
582 | + " ('tree', 0.6348884105682373),\n", | ||
583 | + " ('tree', 0.9513751864433289),\n", | ||
584 | + " ('tree', 0.9830145835876465)]" | ||
585 | + ] | ||
586 | + }, | ||
587 | + "execution_count": 11, | ||
588 | + "metadata": {}, | ||
589 | + "output_type": "execute_result" | ||
590 | + } | ||
591 | + ], | ||
498 | "source": [ | 592 | "source": [ |
499 | "from urllib.parse import urlparse\n", | 593 | "from urllib.parse import urlparse\n", |
500 | "import json\n", | 594 | "import json\n", |
501 | "import numpy as np\n", | 595 | "import numpy as np\n", |
502 | "\n", | 596 | "\n", |
503 | "s3_client = boto3.client('s3')\n", | 597 | "s3_client = boto3.client('s3')\n", |
504 | - "object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']\n", | 598 | + "object_categories = ['tree', 'plank']\n", |
505 | - "\n", | ||
506 | "def list_objects(s3_client, bucket, prefix):\n", | 599 | "def list_objects(s3_client, bucket, prefix):\n", |
507 | " response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)\n", | 600 | " response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)\n", |
508 | " objects = [content['Key'] for content in response['Contents']]\n", | 601 | " objects = [content['Key'] for content in response['Contents']]\n", |
... | @@ -782,7 +875,7 @@ | ... | @@ -782,7 +875,7 @@ |
782 | "name": "python", | 875 | "name": "python", |
783 | "nbconvert_exporter": "python", | 876 | "nbconvert_exporter": "python", |
784 | "pygments_lexer": "ipython3", | 877 | "pygments_lexer": "ipython3", |
785 | - "version": "3.6.3" | 878 | + "version": "3.6.10" |
786 | } | 879 | } |
787 | }, | 880 | }, |
788 | "nbformat": 4, | 881 | "nbformat": 4, | ... | ... |
Code/yogaprojectlambda.py
0 → 100644
1 | +import os | ||
2 | +import greengrasssdk | ||
3 | +from threading import Timer | ||
4 | +import time | ||
5 | +import awscam | ||
6 | +import cv2 | ||
7 | +import mo | ||
8 | +from threading import Thread | ||
9 | + | ||
10 | +# Creating a greengrass core sdk client | ||
11 | +client = greengrasssdk.client('iot-data') | ||
12 | + | ||
13 | +# The information exchanged between IoT and clould has | ||
14 | +# a topic and a message body. | ||
15 | +# This is the topic that this code uses to send messages to cloud | ||
16 | +iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME']) | ||
17 | +jpeg = None | ||
18 | +Write_To_FIFO = True | ||
19 | + | ||
20 | +class FIFO_Thread(Thread): | ||
21 | + def __init__(self): | ||
22 | + ''' Constructor. ''' | ||
23 | + Thread.__init__(self) | ||
24 | + | ||
25 | + def run(self): | ||
26 | + fifo_path = "/tmp/results.mjpeg" | ||
27 | + if not os.path.exists(fifo_path): | ||
28 | + os.mkfifo(fifo_path) | ||
29 | + f = open(fifo_path,'w') | ||
30 | + client.publish(topic=iotTopic, payload="Opened Pipe") | ||
31 | + while Write_To_FIFO: | ||
32 | + try: | ||
33 | + f.write(jpeg.tobytes()) | ||
34 | + except IOError as e: | ||
35 | + continue | ||
36 | + | ||
37 | +def greengrass_infinite_infer_run(): | ||
38 | + try: | ||
39 | + input_width = 224 | ||
40 | + input_height = 224 | ||
41 | + model_name = "image-classification" | ||
42 | + error, model_path = mo.optimize(model_name,input_width,input_height, aux_inputs={'--epoch': 2,'--precision':'FP32'}) | ||
43 | + # The aux_inputs is equal to the number of epochs and in this case, it is 300 | ||
44 | + # Load model to GPU (use {"GPU": 0} for CPU) | ||
45 | + mcfg = {"GPU": 1} | ||
46 | + model = awscam.Model(model_path, mcfg) | ||
47 | + | ||
48 | + client.publish(topic=iotTopic, payload="Model loaded") | ||
49 | + model_type = "classification" | ||
50 | + | ||
51 | + with open('caltech256_labels.txt', 'r') as f: | ||
52 | + labels = [l.rstrip() for l in f] | ||
53 | + | ||
54 | + topk = 2 | ||
55 | + results_thread = FIFO_Thread() | ||
56 | + results_thread.start() | ||
57 | + | ||
58 | + # Send a starting message to IoT console | ||
59 | + client.publish(topic=iotTopic, payload="Inference is starting") | ||
60 | + | ||
61 | + doInfer = True | ||
62 | + while doInfer: | ||
63 | + # Get a frame from the video stream | ||
64 | + ret, frame = awscam.getLastFrame() | ||
65 | + # Raise an exception if failing to get a frame | ||
66 | + if ret == False: | ||
67 | + raise Exception("Failed to get frame from the stream") | ||
68 | + | ||
69 | + # Resize frame to fit model input requirement | ||
70 | + frameResize = cv2.resize(frame, (input_width, input_height)) | ||
71 | + | ||
72 | + # Run model inference on the resized frame | ||
73 | + inferOutput = model.doInference(frameResize) | ||
74 | + | ||
75 | + # Output inference result to the fifo file so it can be viewed with mplayer | ||
76 | + parsed_results = model.parseResult(model_type, inferOutput) | ||
77 | + top_k = parsed_results[model_type][0:topk] | ||
78 | + msg = '{' | ||
79 | + prob_num = 0 | ||
80 | + for obj in top_k: | ||
81 | + if prob_num == topk-1: | ||
82 | + msg += '"{}": {:.2f}'.format(labels[obj["label"]], obj["prob"]*100) | ||
83 | + else: | ||
84 | + msg += '"{}": {:.2f},'.format(labels[obj["label"]], obj["prob"]*100) | ||
85 | + prob_num += 1 | ||
86 | + msg += "}" | ||
87 | + | ||
88 | + client.publish(topic=iotTopic, payload = msg) | ||
89 | + if top_k[0]["prob"]*100 > 65 : | ||
90 | + cv2.putText(frame, labels[top_k[0]["label"]] + ' '+ str(top_k[0]["prob"]*100), (0,22), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 165, 20), 4) | ||
91 | + global jpeg | ||
92 | + ret,jpeg = cv2.imencode('.jpg', frame) | ||
93 | + | ||
94 | + except Exception as e: | ||
95 | + msg = "myModel Lambda failed: " + str(e) | ||
96 | + client.publish(topic=iotTopic, payload=msg) | ||
97 | + | ||
98 | + # Asynchronously schedule this function to be run again in 15 seconds | ||
99 | + Timer(15, greengrass_infinite_infer_run).start() | ||
100 | + | ||
101 | + | ||
102 | +# Execute the function above | ||
103 | +greengrass_infinite_infer_run() | ||
104 | + | ||
105 | + | ||
106 | +# This is a dummy handler and will not be invoked | ||
107 | +# Instead the code above will be executed in an infinite loop for our example | ||
108 | +def function_handler(event, context): | ||
109 | + return | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
-
Please register or login to post a comment