Showing
26 changed files
with
1292 additions
and
257 deletions
.travis.yml
0 → 100644
1 | kappa | 1 | kappa |
2 | ===== | 2 | ===== |
3 | 3 | ||
4 | +[![Build Status](https://travis-ci.org/garnaat/kappa.svg?branch=develop)](https://travis-ci.org/garnaat/kappa) | ||
5 | + | ||
6 | +[![Code Health](https://landscape.io/github/garnaat/kappa/develop/landscape.svg)](https://landscape.io/github/garnaat/kappa/develop) | ||
7 | + | ||
4 | **Kappa** is a command line tool that (hopefully) makes it easier to | 8 | **Kappa** is a command line tool that (hopefully) makes it easier to |
5 | deploy, update, and test functions for AWS Lambda. | 9 | deploy, update, and test functions for AWS Lambda. |
6 | 10 | ||
... | @@ -27,19 +31,27 @@ your function on. | ... | @@ -27,19 +31,27 @@ your function on. |
27 | 31 | ||
28 | Kappa is a command line tool. The basic command format is: | 32 | Kappa is a command line tool. The basic command format is: |
29 | 33 | ||
30 | - kappa --config <path to config file> <command> | 34 | + kappa <path to config file> <command> [optional command args] |
31 | 35 | ||
32 | Where ``command`` is one of: | 36 | Where ``command`` is one of: |
33 | 37 | ||
34 | -* deploy - deploy the CloudFormation template containing the IAM roles and zip the function and upload it to AWS Lambda | 38 | +* deploy - deploy the CloudFormation template containing the IAM roles and zip |
39 | + the function and upload it to AWS Lambda | ||
35 | * test - send test data to the new Lambda function | 40 | * test - send test data to the new Lambda function |
36 | -* tail - display the most recent log events for the function (remember that it can take several minutes before log events are available from CloudWatch) | 41 | +* tail - display the most recent log events for the function (remember that it |
37 | -* add-event-source - hook up an event source to your Lambda function | 42 | + can take several minutes before log events are available from CloudWatch) |
38 | -* delete - delete the CloudFormation stack containing the IAM roles and delete the Lambda function | 43 | +* add-event-sources - hook up an event source to your Lambda function |
44 | +* delete - delete the CloudFormation stack containing the IAM roles and delete | ||
45 | + the Lambda function | ||
46 | +* status - display summary information about functions, stacks, and event | ||
47 | + sources related to your project. | ||
39 | 48 | ||
40 | The ``config file`` is a YAML format file containing all of the information | 49 | The ``config file`` is a YAML format file containing all of the information |
41 | about your Lambda function. | 50 | about your Lambda function. |
42 | 51 | ||
52 | +If you use environment variables for your AWS credentials (as normally supported by boto), | ||
53 | +simply exclude the ``profile`` element from the YAML file. | ||
54 | + | ||
43 | An example project based on a Kinesis stream can be found in | 55 | An example project based on a Kinesis stream can be found in |
44 | [samples/kinesis](https://github.com/garnaat/kappa/tree/develop/samples/kinesis). | 56 | [samples/kinesis](https://github.com/garnaat/kappa/tree/develop/samples/kinesis). |
45 | 57 | ||
... | @@ -49,11 +61,11 @@ The basic workflow is: | ... | @@ -49,11 +61,11 @@ The basic workflow is: |
49 | * Create your CloudFormation template with the execution and invocation roles | 61 | * Create your CloudFormation template with the execution and invocation roles |
50 | * Create some sample data | 62 | * Create some sample data |
51 | * Create the YAML config file with all of the information | 63 | * Create the YAML config file with all of the information |
52 | -* Run ``kappa --config <path-to-config> deploy`` to create roles and upload function | 64 | +* Run ``kappa <path-to-config> deploy`` to create roles and upload function |
53 | -* Run ``kappa --config <path-to-config> test`` to invoke the function with test data | 65 | +* Run ``kappa <path-to-config> test`` to invoke the function with test data |
54 | -* Run ``kappa --config <path-to-config> tail`` to view the functions output in CloudWatch logs | 66 | +* Run ``kappa <path-to-config> tail`` to view the functions output in CloudWatch logs |
55 | -* Run ``kappa --config <path-to-config> add-event-source`` to hook your function up to the event source | 67 | +* Run ``kappa <path-to-config> add-event-source`` to hook your function up to the event source |
56 | -* Run ``kappa --config <path-to-config> tail`` to see more output | 68 | +* Run ``kappa <path-to-config> tail`` to see more output |
57 | 69 | ||
58 | If you have to make changes in your function or in your IAM roles, simply run | 70 | If you have to make changes in your function or in your IAM roles, simply run |
59 | ``kappa deploy`` again and the changes will be uploaded as necessary. | 71 | ``kappa deploy`` again and the changes will be uploaded as necessary. | ... | ... |
... | @@ -14,76 +14,100 @@ | ... | @@ -14,76 +14,100 @@ |
14 | import logging | 14 | import logging |
15 | 15 | ||
16 | import click | 16 | import click |
17 | -import yaml | ||
18 | 17 | ||
19 | -from kappa import Kappa | 18 | +from kappa.context import Context |
20 | 19 | ||
21 | -FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' | ||
22 | 20 | ||
23 | - | 21 | +@click.group() |
24 | -def set_debug_logger(logger_names=['kappa'], stream=None): | 22 | +@click.argument( |
25 | - """ | 23 | + 'config', |
26 | - Convenience function to quickly configure full debug output | ||
27 | - to go to the console. | ||
28 | - """ | ||
29 | - for logger_name in logger_names: | ||
30 | - log = logging.getLogger(logger_name) | ||
31 | - log.setLevel(logging.DEBUG) | ||
32 | - | ||
33 | - ch = logging.StreamHandler(stream) | ||
34 | - ch.setLevel(logging.DEBUG) | ||
35 | - | ||
36 | - # create formatter | ||
37 | - formatter = logging.Formatter(FmtString) | ||
38 | - | ||
39 | - # add formatter to ch | ||
40 | - ch.setFormatter(formatter) | ||
41 | - | ||
42 | - # add ch to logger | ||
43 | - log.addHandler(ch) | ||
44 | - | ||
45 | - | ||
46 | -@click.command() | ||
47 | -@click.option( | ||
48 | - '--config', | ||
49 | - help="Path to the Kappa config YAML file", | ||
50 | type=click.File('rb'), | 24 | type=click.File('rb'), |
51 | envvar='KAPPA_CONFIG', | 25 | envvar='KAPPA_CONFIG', |
52 | - default=None | ||
53 | ) | 26 | ) |
54 | @click.option( | 27 | @click.option( |
55 | '--debug/--no-debug', | 28 | '--debug/--no-debug', |
56 | default=False, | 29 | default=False, |
57 | help='Turn on debugging output' | 30 | help='Turn on debugging output' |
58 | ) | 31 | ) |
59 | -@click.argument( | 32 | +@click.pass_context |
60 | - 'command', | 33 | +def cli(ctx, config=None, debug=False): |
61 | - required=True, | 34 | + config = config |
62 | - type=click.Choice(['deploy', 'test', 'tail', 'add-event-source', 'delete']) | 35 | + ctx.obj['debug'] = debug |
63 | -) | 36 | + ctx.obj['config'] = config |
64 | -def main(config=None, debug=False, command=None): | 37 | + |
65 | - if debug: | 38 | +@cli.command() |
66 | - set_debug_logger() | 39 | +@click.pass_context |
67 | - config = yaml.load(config) | 40 | +def deploy(ctx): |
68 | - kappa = Kappa(config) | 41 | + context = Context(ctx.obj['config'], ctx.obj['debug']) |
69 | - if command == 'deploy': | 42 | + click.echo('deploying...') |
70 | - click.echo('Deploying ...') | 43 | + context.deploy() |
71 | - kappa.deploy() | ||
72 | - elif command == 'test': | ||
73 | - click.echo('Sending test data ...') | ||
74 | - kappa.test() | ||
75 | click.echo('...done') | 44 | click.echo('...done') |
76 | - elif command == 'tail': | 45 | + |
77 | - kappa.tail() | 46 | +@cli.command() |
78 | - elif command == 'delete': | 47 | +@click.pass_context |
79 | - click.echo('Deleting ...') | 48 | +def test(ctx): |
80 | - kappa.delete() | 49 | + context = Context(ctx.obj['config'], ctx.obj['debug']) |
50 | + click.echo('testing...') | ||
51 | + context.test() | ||
52 | + click.echo('...done') | ||
53 | + | ||
54 | +@cli.command() | ||
55 | +@click.pass_context | ||
56 | +def tail(ctx): | ||
57 | + context = Context(ctx.obj['config'], ctx.obj['debug']) | ||
58 | + click.echo('tailing logs...') | ||
59 | + context.tail() | ||
81 | click.echo('...done') | 60 | click.echo('...done') |
82 | - elif command == 'add-event-source': | 61 | + |
83 | - click.echo('Adding event source ...') | 62 | +@cli.command() |
84 | - kappa.add_event_source() | 63 | +@click.pass_context |
64 | +def status(ctx): | ||
65 | + context = Context(ctx.obj['config'], ctx.obj['debug']) | ||
66 | + status = context.status() | ||
67 | + click.echo(click.style('Stack', bold=True)) | ||
68 | + if status['stack']: | ||
69 | + for stack in status['stack']['Stacks']: | ||
70 | + line = ' {}: {}'.format(stack['StackId'], stack['StackStatus']) | ||
71 | + click.echo(click.style(line, fg='green')) | ||
72 | + else: | ||
73 | + click.echo(click.style(' None', fg='green')) | ||
74 | + click.echo(click.style('Function', bold=True)) | ||
75 | + if status['function']: | ||
76 | + line = ' {}'.format( | ||
77 | + status['function']['Configuration']['FunctionName']) | ||
78 | + click.echo(click.style(line, fg='green')) | ||
79 | + else: | ||
80 | + click.echo(click.style(' None', fg='green')) | ||
81 | + click.echo(click.style('Event Sources', bold=True)) | ||
82 | + if status['event_sources']: | ||
83 | + for event_source in status['event_sources']: | ||
84 | + if 'EventSource' in event_source: | ||
85 | + line = ' {}: {}'.format( | ||
86 | + event_source['EventSource'], event_source['IsActive']) | ||
87 | + click.echo(click.style(line, fg='green')) | ||
88 | + else: | ||
89 | + line = ' {}'.format( | ||
90 | + event_source['CloudFunctionConfiguration']['Id']) | ||
91 | + click.echo(click.style(line, fg='green')) | ||
92 | + else: | ||
93 | + click.echo(click.style(' None', fg='green')) | ||
94 | + | ||
95 | +@cli.command() | ||
96 | +@click.pass_context | ||
97 | +def delete(ctx): | ||
98 | + context = Context(ctx.obj['config'], ctx.obj['debug']) | ||
99 | + click.echo('deleting...') | ||
100 | + context.delete() | ||
101 | + click.echo('...done') | ||
102 | + | ||
103 | +@cli.command() | ||
104 | +@click.pass_context | ||
105 | +def add_event_sources(ctx): | ||
106 | + context = Context(ctx.obj['config'], ctx.obj['debug']) | ||
107 | + click.echo('adding event sources...') | ||
108 | + context.add_event_sources() | ||
85 | click.echo('...done') | 109 | click.echo('...done') |
86 | 110 | ||
87 | 111 | ||
88 | if __name__ == '__main__': | 112 | if __name__ == '__main__': |
89 | - main() | 113 | + cli(obj={}) | ... | ... |
... | @@ -11,192 +11,6 @@ | ... | @@ -11,192 +11,6 @@ |
11 | # ANY KIND, either express or implied. See the License for the specific | 11 | # ANY KIND, either express or implied. See the License for the specific |
12 | # language governing permissions and limitations under the License. | 12 | # language governing permissions and limitations under the License. |
13 | 13 | ||
14 | -import logging | ||
15 | import os | 14 | import os |
16 | -import zipfile | ||
17 | -import time | ||
18 | 15 | ||
19 | -import botocore.session | 16 | +__version__ = open(os.path.join(os.path.dirname(__file__), '_version')).read() |
20 | -from botocore.exceptions import ClientError | ||
21 | - | ||
22 | -LOG = logging.getLogger(__name__) | ||
23 | - | ||
24 | - | ||
25 | -class Kappa(object): | ||
26 | - | ||
27 | - completed_states = ('CREATE_COMPLETE', 'UPDATE_COMPLETE') | ||
28 | - | ||
29 | - def __init__(self, config): | ||
30 | - self.config = config | ||
31 | - self.session = botocore.session.get_session() | ||
32 | - self.session.profile = config['profile'] | ||
33 | - self.region = config['region'] | ||
34 | - | ||
35 | - def create_update_roles(self, stack_name, roles_path): | ||
36 | - LOG.debug('create_update_policies: stack_name=%s', stack_name) | ||
37 | - LOG.debug('create_update_policies: roles_path=%s', roles_path) | ||
38 | - cfn = self.session.create_client('cloudformation', self.region) | ||
39 | - # Does stack already exist? | ||
40 | - try: | ||
41 | - response = cfn.describe_stacks(StackName=stack_name) | ||
42 | - LOG.debug('Stack %s already exists', stack_name) | ||
43 | - except ClientError: | ||
44 | - LOG.debug('Stack %s does not exist', stack_name) | ||
45 | - response = None | ||
46 | - template_body = open(roles_path).read() | ||
47 | - if response: | ||
48 | - try: | ||
49 | - cfn.update_stack( | ||
50 | - StackName=stack_name, TemplateBody=template_body, | ||
51 | - Capabilities=['CAPABILITY_IAM']) | ||
52 | - except ClientError, e: | ||
53 | - LOG.debug(str(e)) | ||
54 | - else: | ||
55 | - response = cfn.create_stack( | ||
56 | - StackName=stack_name, TemplateBody=template_body, | ||
57 | - Capabilities=['CAPABILITY_IAM']) | ||
58 | - done = False | ||
59 | - while not done: | ||
60 | - time.sleep(1) | ||
61 | - response = cfn.describe_stacks(StackName=stack_name) | ||
62 | - status = response['Stacks'][0]['StackStatus'] | ||
63 | - LOG.debug('Stack status is: %s', status) | ||
64 | - if status in self.completed_states: | ||
65 | - done = True | ||
66 | - | ||
67 | - def get_role_arn(self, role_name): | ||
68 | - role_arn = None | ||
69 | - cfn = self.session.create_client('cloudformation', self.region) | ||
70 | - try: | ||
71 | - resources = cfn.list_stack_resources( | ||
72 | - StackName=self.config['cloudformation']['stack_name']) | ||
73 | - except Exception: | ||
74 | - LOG.exception('Unable to find role ARN: %s', role_name) | ||
75 | - for resource in resources['StackResourceSummaries']: | ||
76 | - if resource['LogicalResourceId'] == role_name: | ||
77 | - iam = self.session.create_client('iam') | ||
78 | - role = iam.get_role(RoleName=resource['PhysicalResourceId']) | ||
79 | - role_arn = role['Role']['Arn'] | ||
80 | - LOG.debug('role_arn: %s', role_arn) | ||
81 | - return role_arn | ||
82 | - | ||
83 | - def delete_roles(self, stack_name): | ||
84 | - LOG.debug('delete_roles: stack_name=%s', stack_name) | ||
85 | - cfn = self.session.create_client('cloudformation', self.region) | ||
86 | - try: | ||
87 | - cfn.delete_stack(StackName=stack_name) | ||
88 | - except Exception: | ||
89 | - LOG.exception('Unable to delete stack: %s', stack_name) | ||
90 | - | ||
91 | - def _zip_lambda_dir(self, zipfile_name, lambda_dir): | ||
92 | - LOG.debug('_zip_lambda_dir: lambda_dir=%s', lambda_dir) | ||
93 | - LOG.debug('zipfile_name=%s', zipfile_name) | ||
94 | - relroot = os.path.abspath(os.path.join(lambda_dir, os.pardir)) | ||
95 | - with zipfile.ZipFile(zipfile_name, 'w') as zf: | ||
96 | - for root, dirs, files in os.walk(lambda_dir): | ||
97 | - zf.write(root, os.path.relpath(root, relroot)) | ||
98 | - for file in files: | ||
99 | - filename = os.path.join(root, file) | ||
100 | - if os.path.isfile(filename): | ||
101 | - arcname = os.path.join( | ||
102 | - os.path.relpath(root, relroot), file) | ||
103 | - zf.write(filename, arcname) | ||
104 | - | ||
105 | - def _zip_lambda_file(self, zipfile_name, lambda_file): | ||
106 | - LOG.debug('_zip_lambda_file: lambda_file=%s', lambda_file) | ||
107 | - LOG.debug('zipfile_name=%s', zipfile_name) | ||
108 | - with zipfile.ZipFile(zipfile_name, 'w') as zf: | ||
109 | - zf.write(lambda_file) | ||
110 | - | ||
111 | - def zip_lambda_function(self, zipfile_name, lambda_fn): | ||
112 | - if os.path.isdir(lambda_fn): | ||
113 | - self._zip_lambda_dir(zipfile_name, lambda_fn) | ||
114 | - else: | ||
115 | - self._zip_lambda_file(zipfile_name, lambda_fn) | ||
116 | - | ||
117 | - def upload_lambda_function(self, zip_file): | ||
118 | - LOG.debug('uploading %s', zip_file) | ||
119 | - lambda_svc = self.session.create_client('lambda', self.region) | ||
120 | - with open(zip_file, 'rb') as fp: | ||
121 | - exec_role = self.get_role_arn( | ||
122 | - self.config['cloudformation']['exec_role']) | ||
123 | - try: | ||
124 | - response = lambda_svc.upload_function( | ||
125 | - FunctionName=self.config['lambda']['name'], | ||
126 | - FunctionZip=fp, | ||
127 | - Runtime=self.config['lambda']['runtime'], | ||
128 | - Role=exec_role, | ||
129 | - Handler=self.config['lambda']['handler'], | ||
130 | - Mode=self.config['lambda']['mode'], | ||
131 | - Description=self.config['lambda']['description'], | ||
132 | - Timeout=self.config['lambda']['timeout'], | ||
133 | - MemorySize=self.config['lambda']['memory_size']) | ||
134 | - LOG.debug(response) | ||
135 | - except Exception: | ||
136 | - LOG.exception('Unable to upload zip file') | ||
137 | - | ||
138 | - def delete_lambda_function(self, function_name): | ||
139 | - LOG.debug('deleting function %s', function_name) | ||
140 | - lambda_svc = self.session.create_client('lambda', self.region) | ||
141 | - response = lambda_svc.delete_function(FunctionName=function_name) | ||
142 | - LOG.debug(response) | ||
143 | - return response | ||
144 | - | ||
145 | - def _invoke_asynch(self, data_file): | ||
146 | - LOG.debug('_invoke_async %s', data_file) | ||
147 | - with open(data_file) as fp: | ||
148 | - lambda_svc = self.session.create_client('lambda', self.region) | ||
149 | - response = lambda_svc.invoke_async( | ||
150 | - FunctionName=self.config['lambda']['name'], | ||
151 | - InvokeArgs=fp) | ||
152 | - LOG.debug(response) | ||
153 | - | ||
154 | - def _tail(self, function_name): | ||
155 | - LOG.debug('tailing function: %s', function_name) | ||
156 | - log_svc = self.session.create_client('logs', self.region) | ||
157 | - log_group_name = '/aws/lambda/%s' % function_name | ||
158 | - latest_stream = None | ||
159 | - response = log_svc.describe_log_streams(logGroupName=log_group_name) | ||
160 | - for stream in response['logStreams']: | ||
161 | - if not latest_stream: | ||
162 | - latest_stream = stream | ||
163 | - elif stream['lastEventTimestamp'] > latest_stream['lastEventTimestamp']: | ||
164 | - latest_stream = stream | ||
165 | - response = log_svc.get_log_events( | ||
166 | - logGroupName=log_group_name, | ||
167 | - logStreamName=latest_stream['logStreamName']) | ||
168 | - for log_event in response['events']: | ||
169 | - print('%s: %s' % (log_event['timestamp'], log_event['message'])) | ||
170 | - | ||
171 | - def add_event_source(self): | ||
172 | - lambda_svc = self.session.create_client('lambda', self.region) | ||
173 | - try: | ||
174 | - invoke_role = self.get_role_arn( | ||
175 | - self.config['cloudformation']['invoke_role']) | ||
176 | - response = lambda_svc.add_event_source( | ||
177 | - FunctionName=self.config['lambda']['name'], | ||
178 | - Role=invoke_role, | ||
179 | - EventSource=self.config['lambda']['event_source'], | ||
180 | - BatchSize=self.config['lambda'].get('batch_size', 100)) | ||
181 | - LOG.debug(response) | ||
182 | - except Exception: | ||
183 | - LOG.exception('Unable to add event source') | ||
184 | - | ||
185 | - def deploy(self): | ||
186 | - self.create_update_roles( | ||
187 | - self.config['cloudformation']['stack_name'], | ||
188 | - self.config['cloudformation']['template']) | ||
189 | - self.zip_lambda_function( | ||
190 | - self.config['lambda']['zipfile_name'], | ||
191 | - self.config['lambda']['path']) | ||
192 | - self.upload_lambda_function(self.config['lambda']['zipfile_name']) | ||
193 | - | ||
194 | - def test(self): | ||
195 | - self._invoke_asynch(self.config['lambda']['test_data']) | ||
196 | - | ||
197 | - def tail(self): | ||
198 | - self._tail(self.config['lambda']['name']) | ||
199 | - | ||
200 | - def delete(self): | ||
201 | - self.delete_roles(self.config['cloudformation']['stack_name']) | ||
202 | - self.delete_lambda_function(self.config['lambda']['name']) | ... | ... |
kappa/aws.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import botocore.session | ||
15 | + | ||
16 | + | ||
17 | +class __AWS(object): | ||
18 | + | ||
19 | + def __init__(self, profile=None, region=None): | ||
20 | + self._client_cache = {} | ||
21 | + self._session = botocore.session.get_session() | ||
22 | + self._session.profile = profile | ||
23 | + self._region = region | ||
24 | + | ||
25 | + def create_client(self, client_name): | ||
26 | + if client_name not in self._client_cache: | ||
27 | + self._client_cache[client_name] = self._session.create_client( | ||
28 | + client_name, self._region) | ||
29 | + return self._client_cache[client_name] | ||
30 | + | ||
31 | + | ||
32 | +__Singleton_AWS = None | ||
33 | + | ||
34 | + | ||
35 | +def get_aws(context): | ||
36 | + global __Singleton_AWS | ||
37 | + if __Singleton_AWS is None: | ||
38 | + __Singleton_AWS = __AWS(context.profile, context.region) | ||
39 | + return __Singleton_AWS |
kappa/context.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import logging | ||
15 | +import yaml | ||
16 | + | ||
17 | +import kappa.function | ||
18 | +import kappa.event_source | ||
19 | +import kappa.stack | ||
20 | + | ||
21 | +LOG = logging.getLogger(__name__) | ||
22 | + | ||
23 | +DebugFmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' | ||
24 | +InfoFmtString = '\t%(message)s' | ||
25 | + | ||
26 | + | ||
27 | +class Context(object): | ||
28 | + | ||
29 | + def __init__(self, config_file, debug=False): | ||
30 | + if debug: | ||
31 | + self.set_logger('kappa', logging.DEBUG) | ||
32 | + else: | ||
33 | + self.set_logger('kappa', logging.INFO) | ||
34 | + self.config = yaml.load(config_file) | ||
35 | + self._stack = kappa.stack.Stack( | ||
36 | + self, self.config['cloudformation']) | ||
37 | + self.function = kappa.function.Function( | ||
38 | + self, self.config['lambda']) | ||
39 | + self.event_sources = [] | ||
40 | + self._create_event_sources() | ||
41 | + | ||
42 | + @property | ||
43 | + def profile(self): | ||
44 | + return self.config.get('profile', None) | ||
45 | + | ||
46 | + @property | ||
47 | + def region(self): | ||
48 | + return self.config.get('region', None) | ||
49 | + | ||
50 | + @property | ||
51 | + def cfn_config(self): | ||
52 | + return self.config.get('cloudformation', None) | ||
53 | + | ||
54 | + @property | ||
55 | + def lambda_config(self): | ||
56 | + return self.config.get('lambda', None) | ||
57 | + | ||
58 | + @property | ||
59 | + def exec_role_arn(self): | ||
60 | + return self._stack.exec_role_arn | ||
61 | + | ||
62 | + @property | ||
63 | + def invoke_role_arn(self): | ||
64 | + return self._stack.invoke_role_arn | ||
65 | + | ||
66 | + def debug(self): | ||
67 | + self.set_logger('kappa', logging.DEBUG) | ||
68 | + | ||
69 | + def set_logger(self, logger_name, level=logging.INFO): | ||
70 | + """ | ||
71 | + Convenience function to quickly configure full debug output | ||
72 | + to go to the console. | ||
73 | + """ | ||
74 | + log = logging.getLogger(logger_name) | ||
75 | + log.setLevel(level) | ||
76 | + | ||
77 | + ch = logging.StreamHandler(None) | ||
78 | + ch.setLevel(level) | ||
79 | + | ||
80 | + # create formatter | ||
81 | + if level == logging.INFO: | ||
82 | + formatter = logging.Formatter(InfoFmtString) | ||
83 | + else: | ||
84 | + formatter = logging.Formatter(DebugFmtString) | ||
85 | + | ||
86 | + # add formatter to ch | ||
87 | + ch.setFormatter(formatter) | ||
88 | + | ||
89 | + # add ch to logger | ||
90 | + log.addHandler(ch) | ||
91 | + | ||
92 | + def _create_event_sources(self): | ||
93 | + for event_source_cfg in self.config['lambda']['event_sources']: | ||
94 | + _, _, svc, _ = event_source_cfg['arn'].split(':', 3) | ||
95 | + if svc == 'kinesis': | ||
96 | + self.event_sources.append( | ||
97 | + kappa.event_source.KinesisEventSource( | ||
98 | + self, event_source_cfg)) | ||
99 | + elif svc == 's3': | ||
100 | + self.event_sources.append(kappa.event_source.S3EventSource( | ||
101 | + self, event_source_cfg)) | ||
102 | + else: | ||
103 | + msg = 'Unsupported event source: %s' % event_source_cfg['arn'] | ||
104 | + raise ValueError(msg) | ||
105 | + | ||
106 | + def add_event_sources(self): | ||
107 | + for event_source in self.event_sources: | ||
108 | + event_source.add(self.function) | ||
109 | + | ||
110 | + def deploy(self): | ||
111 | + self._stack.update() | ||
112 | + self.function.upload() | ||
113 | + | ||
114 | + def test(self): | ||
115 | + self.function.test() | ||
116 | + | ||
117 | + def tail(self): | ||
118 | + return self.function.tail() | ||
119 | + | ||
120 | + def delete(self): | ||
121 | + self._stack.delete() | ||
122 | + self.function.delete() | ||
123 | + for event_source in self.event_sources: | ||
124 | + event_source.remove(self.function) | ||
125 | + | ||
126 | + def status(self): | ||
127 | + status = {} | ||
128 | + status['stack'] = self._stack.status() | ||
129 | + status['function'] = self.function.status() | ||
130 | + status['event_sources'] = [] | ||
131 | + for event_source in self.event_sources: | ||
132 | + status['event_sources'].append(event_source.status(self.function)) | ||
133 | + return status |
kappa/event_source.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import logging | ||
15 | + | ||
16 | +from botocore.exceptions import ClientError | ||
17 | + | ||
18 | +import kappa.aws | ||
19 | + | ||
20 | +LOG = logging.getLogger(__name__) | ||
21 | + | ||
22 | + | ||
23 | +class EventSource(object): | ||
24 | + | ||
25 | + def __init__(self, context, config): | ||
26 | + self._context = context | ||
27 | + self._config = config | ||
28 | + | ||
29 | + @property | ||
30 | + def arn(self): | ||
31 | + return self._config['arn'] | ||
32 | + | ||
33 | + @property | ||
34 | + def batch_size(self): | ||
35 | + return self._config.get('batch_size', 100) | ||
36 | + | ||
37 | + | ||
38 | +class KinesisEventSource(EventSource): | ||
39 | + | ||
40 | + def __init__(self, context, config): | ||
41 | + super(KinesisEventSource, self).__init__(context, config) | ||
42 | + aws = kappa.aws.get_aws(context) | ||
43 | + self._lambda = aws.create_client('lambda') | ||
44 | + | ||
45 | + def _get_uuid(self, function): | ||
46 | + uuid = None | ||
47 | + response = self._lambda.list_event_sources( | ||
48 | + FunctionName=function.name, | ||
49 | + EventSourceArn=self.arn) | ||
50 | + LOG.debug(response) | ||
51 | + if len(response['EventSources']) > 0: | ||
52 | + uuid = response['EventSources'][0]['UUID'] | ||
53 | + return uuid | ||
54 | + | ||
55 | + def add(self, function): | ||
56 | + try: | ||
57 | + response = self._lambda.add_event_source( | ||
58 | + FunctionName=function.name, | ||
59 | + Role=self._context.invoke_role_arn, | ||
60 | + EventSource=self.arn, | ||
61 | + BatchSize=self.batch_size) | ||
62 | + LOG.debug(response) | ||
63 | + except Exception: | ||
64 | + LOG.exception('Unable to add Kinesis event source') | ||
65 | + | ||
66 | + def remove(self, function): | ||
67 | + response = None | ||
68 | + uuid = self._get_uuid(function) | ||
69 | + if uuid: | ||
70 | + response = self._lambda.remove_event_source( | ||
71 | + UUID=uuid) | ||
72 | + LOG.debug(response) | ||
73 | + return response | ||
74 | + | ||
75 | + def status(self, function): | ||
76 | + LOG.debug('getting status for event source %s', self.arn) | ||
77 | + try: | ||
78 | + response = self._lambda.get_event_source( | ||
79 | + UUID=self._get_uuid(function)) | ||
80 | + LOG.debug(response) | ||
81 | + except ClientError: | ||
82 | + LOG.debug('event source %s does not exist', self.arn) | ||
83 | + response = None | ||
84 | + return response | ||
85 | + | ||
86 | + | ||
87 | +class S3EventSource(EventSource): | ||
88 | + | ||
89 | + def __init__(self, context, config): | ||
90 | + super(S3EventSource, self).__init__(context, config) | ||
91 | + aws = kappa.aws.get_aws(context) | ||
92 | + self._s3 = aws.create_client('s3') | ||
93 | + | ||
94 | + def _make_notification_id(self, function_name): | ||
95 | + return 'Kappa-%s-notification' % function_name | ||
96 | + | ||
97 | + def _get_bucket_name(self): | ||
98 | + return self.arn.split(':')[-1] | ||
99 | + | ||
100 | + def add(self, function): | ||
101 | + notification_spec = { | ||
102 | + 'CloudFunctionConfiguration': { | ||
103 | + 'Id': self._make_notification_id(function.name), | ||
104 | + 'Events': [e for e in self._config['events']], | ||
105 | + 'CloudFunction': function.arn, | ||
106 | + 'InvocationRole': self._context.invoke_role_arn}} | ||
107 | + try: | ||
108 | + response = self._s3.put_bucket_notification( | ||
109 | + Bucket=self._get_bucket_name(), | ||
110 | + NotificationConfiguration=notification_spec) | ||
111 | + LOG.debug(response) | ||
112 | + except Exception: | ||
113 | + LOG.exception('Unable to add S3 event source') | ||
114 | + | ||
115 | + def remove(self, function): | ||
116 | + LOG.debug('removing s3 notification') | ||
117 | + response = self._s3.get_bucket_notification( | ||
118 | + Bucket=self._get_bucket_name()) | ||
119 | + LOG.debug(response) | ||
120 | + if 'CloudFunctionConfiguration' in response: | ||
121 | + fn_arn = response['CloudFunctionConfiguration']['CloudFunction'] | ||
122 | + if fn_arn == function.arn: | ||
123 | + del response['CloudFunctionConfiguration'] | ||
124 | + response = self._s3.put_bucket_notification( | ||
125 | + Bucket=self._get_bucket_name(), | ||
126 | + NotificationConfiguration=response) | ||
127 | + LOG.debug(response) | ||
128 | + | ||
129 | + def status(self, function): | ||
130 | + LOG.debug('status for s3 notification for %s', function.name) | ||
131 | + response = self._s3.get_bucket_notification( | ||
132 | + Bucket=self._get_bucket_name()) | ||
133 | + LOG.debug(response) | ||
134 | + if 'CloudFunctionConfiguration' not in response: | ||
135 | + response = None | ||
136 | + return response |
kappa/function.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import logging | ||
15 | +import os | ||
16 | +import zipfile | ||
17 | + | ||
18 | +from botocore.exceptions import ClientError | ||
19 | + | ||
20 | +import kappa.aws | ||
21 | +import kappa.log | ||
22 | + | ||
23 | +LOG = logging.getLogger(__name__) | ||
24 | + | ||
25 | + | ||
26 | +class Function(object): | ||
27 | + | ||
28 | + def __init__(self, context, config): | ||
29 | + self._context = context | ||
30 | + self._config = config | ||
31 | + aws = kappa.aws.get_aws(context) | ||
32 | + self._lambda_svc = aws.create_client('lambda') | ||
33 | + self._arn = None | ||
34 | + self._log = None | ||
35 | + | ||
36 | + @property | ||
37 | + def name(self): | ||
38 | + return self._config['name'] | ||
39 | + | ||
40 | + @property | ||
41 | + def runtime(self): | ||
42 | + return self._config['runtime'] | ||
43 | + | ||
44 | + @property | ||
45 | + def handler(self): | ||
46 | + return self._config['handler'] | ||
47 | + | ||
48 | + @property | ||
49 | + def mode(self): | ||
50 | + return self._config['mode'] | ||
51 | + | ||
52 | + @property | ||
53 | + def description(self): | ||
54 | + return self._config['description'] | ||
55 | + | ||
56 | + @property | ||
57 | + def timeout(self): | ||
58 | + return self._config['timeout'] | ||
59 | + | ||
60 | + @property | ||
61 | + def memory_size(self): | ||
62 | + return self._config['memory_size'] | ||
63 | + | ||
64 | + @property | ||
65 | + def zipfile_name(self): | ||
66 | + return self._config['zipfile_name'] | ||
67 | + | ||
68 | + @property | ||
69 | + def path(self): | ||
70 | + return self._config['path'] | ||
71 | + | ||
72 | + @property | ||
73 | + def test_data(self): | ||
74 | + return self._config['test_data'] | ||
75 | + | ||
76 | + @property | ||
77 | + def arn(self): | ||
78 | + if self._arn is None: | ||
79 | + try: | ||
80 | + response = self._lambda_svc.get_function_configuration( | ||
81 | + FunctionName=self.name) | ||
82 | + LOG.debug(response) | ||
83 | + self._arn = response['FunctionARN'] | ||
84 | + except Exception: | ||
85 | + LOG.debug('Unable to find ARN for function: %s', self.name) | ||
86 | + return self._arn | ||
87 | + | ||
88 | + @property | ||
89 | + def log(self): | ||
90 | + if self._log is None: | ||
91 | + log_group_name = '/aws/lambda/%s' % self.name | ||
92 | + self._log = kappa.log.Log(self._context, log_group_name) | ||
93 | + return self._log | ||
94 | + | ||
95 | + def tail(self): | ||
96 | + LOG.debug('tailing function: %s', self.name) | ||
97 | + return self.log.tail() | ||
98 | + | ||
99 | + def _zip_lambda_dir(self, zipfile_name, lambda_dir): | ||
100 | + LOG.debug('_zip_lambda_dir: lambda_dir=%s', lambda_dir) | ||
101 | + LOG.debug('zipfile_name=%s', zipfile_name) | ||
102 | + relroot = os.path.abspath(lambda_dir) | ||
103 | + with zipfile.ZipFile(zipfile_name, 'w', | ||
104 | + compression=zipfile.ZIP_DEFLATED) as zf: | ||
105 | + for root, dirs, files in os.walk(lambda_dir): | ||
106 | + zf.write(root, os.path.relpath(root, relroot)) | ||
107 | + for filename in files: | ||
108 | + filepath = os.path.join(root, filename) | ||
109 | + if os.path.isfile(filepath): | ||
110 | + arcname = os.path.join( | ||
111 | + os.path.relpath(root, relroot), filename) | ||
112 | + zf.write(filepath, arcname) | ||
113 | + | ||
114 | + def _zip_lambda_file(self, zipfile_name, lambda_file): | ||
115 | + LOG.debug('_zip_lambda_file: lambda_file=%s', lambda_file) | ||
116 | + LOG.debug('zipfile_name=%s', zipfile_name) | ||
117 | + with zipfile.ZipFile(zipfile_name, 'w', | ||
118 | + compression=zipfile.ZIP_DEFLATED) as zf: | ||
119 | + zf.write(lambda_file) | ||
120 | + | ||
121 | + def zip_lambda_function(self, zipfile_name, lambda_fn): | ||
122 | + if os.path.isdir(lambda_fn): | ||
123 | + self._zip_lambda_dir(zipfile_name, lambda_fn) | ||
124 | + else: | ||
125 | + self._zip_lambda_file(zipfile_name, lambda_fn) | ||
126 | + | ||
127 | + def upload(self): | ||
128 | + LOG.debug('uploading %s', self.zipfile_name) | ||
129 | + self.zip_lambda_function(self.zipfile_name, self.path) | ||
130 | + with open(self.zipfile_name, 'rb') as fp: | ||
131 | + exec_role = self._context.exec_role_arn | ||
132 | + try: | ||
133 | + response = self._lambda_svc.upload_function( | ||
134 | + FunctionName=self.name, | ||
135 | + FunctionZip=fp, | ||
136 | + Runtime=self.runtime, | ||
137 | + Role=exec_role, | ||
138 | + Handler=self.handler, | ||
139 | + Mode=self.mode, | ||
140 | + Description=self.description, | ||
141 | + Timeout=self.timeout, | ||
142 | + MemorySize=self.memory_size) | ||
143 | + LOG.debug(response) | ||
144 | + except Exception: | ||
145 | + LOG.exception('Unable to upload zip file') | ||
146 | + | ||
147 | + def delete(self): | ||
148 | + LOG.debug('deleting function %s', self.name) | ||
149 | + response = self._lambda_svc.delete_function(FunctionName=self.name) | ||
150 | + LOG.debug(response) | ||
151 | + return response | ||
152 | + | ||
153 | + def status(self): | ||
154 | + LOG.debug('getting status for function %s', self.name) | ||
155 | + try: | ||
156 | + response = self._lambda_svc.get_function( | ||
157 | + FunctionName=self.name) | ||
158 | + LOG.debug(response) | ||
159 | + except ClientError: | ||
160 | + LOG.debug('function %s not found', self.name) | ||
161 | + response = None | ||
162 | + return response | ||
163 | + | ||
164 | + def invoke_asynch(self, data_file): | ||
165 | + LOG.debug('_invoke_async %s', data_file) | ||
166 | + with open(data_file) as fp: | ||
167 | + response = self._lambda_svc.invoke_async( | ||
168 | + FunctionName=self.name, | ||
169 | + InvokeArgs=fp) | ||
170 | + LOG.debug(response) | ||
171 | + | ||
172 | + def test(self): | ||
173 | + self.invoke_asynch(self.test_data) |
kappa/log.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import logging | ||
15 | + | ||
16 | +LOG = logging.getLogger(__name__) | ||
17 | + | ||
18 | +import kappa.aws | ||
19 | + | ||
20 | + | ||
21 | +class Log(object): | ||
22 | + | ||
23 | + def __init__(self, context, log_group_name): | ||
24 | + self._context = context | ||
25 | + self.log_group_name = log_group_name | ||
26 | + aws = kappa.aws.get_aws(self._context) | ||
27 | + self._log_svc = aws.create_client('logs') | ||
28 | + | ||
29 | + def _check_for_log_group(self): | ||
30 | + LOG.debug('checking for log group') | ||
31 | + response = self._log_svc.describe_log_groups() | ||
32 | + log_group_names = [lg['logGroupName'] for lg in response['logGroups']] | ||
33 | + return self.log_group_name in log_group_names | ||
34 | + | ||
35 | + def streams(self): | ||
36 | + LOG.debug('getting streams for log group: %s', self.log_group_name) | ||
37 | + if not self._check_for_log_group(): | ||
38 | + LOG.info( | ||
39 | + 'log group %s has not been created yet', self.log_group_name) | ||
40 | + return [] | ||
41 | + response = self._log_svc.describe_log_streams( | ||
42 | + logGroupName=self.log_group_name) | ||
43 | + LOG.debug(response) | ||
44 | + return response['logStreams'] | ||
45 | + | ||
46 | + def tail(self): | ||
47 | + LOG.debug('tailing log group: %s', self.log_group_name) | ||
48 | + if not self._check_for_log_group(): | ||
49 | + LOG.info( | ||
50 | + 'log group %s has not been created yet', self.log_group_name) | ||
51 | + return [] | ||
52 | + latest_stream = None | ||
53 | + streams = self.streams() | ||
54 | + for stream in streams: | ||
55 | + if not latest_stream: | ||
56 | + latest_stream = stream | ||
57 | + elif stream['lastEventTimestamp'] > latest_stream['lastEventTimestamp']: | ||
58 | + latest_stream = stream | ||
59 | + response = self._log_svc.get_log_events( | ||
60 | + logGroupName=self.log_group_name, | ||
61 | + logStreamName=latest_stream['logStreamName']) | ||
62 | + LOG.debug(response) | ||
63 | + return response['events'] |
kappa/stack.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import logging | ||
15 | +import time | ||
16 | + | ||
17 | +import kappa.aws | ||
18 | + | ||
19 | +LOG = logging.getLogger(__name__) | ||
20 | + | ||
21 | + | ||
22 | +class Stack(object): | ||
23 | + | ||
24 | + completed_states = ('CREATE_COMPLETE', 'UPDATE_COMPLETE') | ||
25 | + failed_states = ('ROLLBACK_COMPLETE',) | ||
26 | + | ||
27 | + def __init__(self, context, config): | ||
28 | + self._context = context | ||
29 | + self._config = config | ||
30 | + aws = kappa.aws.get_aws(self._context) | ||
31 | + self._cfn = aws.create_client('cloudformation') | ||
32 | + self._iam = aws.create_client('iam') | ||
33 | + | ||
34 | + @property | ||
35 | + def name(self): | ||
36 | + return self._config['stack_name'] | ||
37 | + | ||
38 | + @property | ||
39 | + def template_path(self): | ||
40 | + return self._config['template'] | ||
41 | + | ||
42 | + @property | ||
43 | + def exec_role(self): | ||
44 | + return self._config['exec_role'] | ||
45 | + | ||
46 | + @property | ||
47 | + def exec_role_arn(self): | ||
48 | + return self._get_role_arn(self.exec_role) | ||
49 | + | ||
50 | + @property | ||
51 | + def invoke_role(self): | ||
52 | + return self._config['invoke_role'] | ||
53 | + | ||
54 | + @property | ||
55 | + def invoke_role_arn(self): | ||
56 | + return self._get_role_arn(self.invoke_role) | ||
57 | + | ||
58 | + def _get_role_arn(self, role_name): | ||
59 | + role_arn = None | ||
60 | + try: | ||
61 | + resources = self._cfn.list_stack_resources( | ||
62 | + StackName=self.name) | ||
63 | + LOG.debug(resources) | ||
64 | + except Exception: | ||
65 | + LOG.exception('Unable to find role ARN: %s', role_name) | ||
66 | + for resource in resources['StackResourceSummaries']: | ||
67 | + if resource['LogicalResourceId'] == role_name: | ||
68 | + role = self._iam.get_role( | ||
69 | + RoleName=resource['PhysicalResourceId']) | ||
70 | + LOG.debug(role) | ||
71 | + role_arn = role['Role']['Arn'] | ||
72 | + LOG.debug('role_arn: %s', role_arn) | ||
73 | + return role_arn | ||
74 | + | ||
75 | + def exists(self): | ||
76 | + """ | ||
77 | + Does Cloudformation Stack already exist? | ||
78 | + """ | ||
79 | + try: | ||
80 | + response = self._cfn.describe_stacks(StackName=self.name) | ||
81 | + LOG.debug('Stack %s exists', self.name) | ||
82 | + except Exception: | ||
83 | + LOG.debug('Stack %s does not exist', self.name) | ||
84 | + response = None | ||
85 | + return response | ||
86 | + | ||
87 | + def wait(self): | ||
88 | + done = False | ||
89 | + while not done: | ||
90 | + time.sleep(1) | ||
91 | + response = self._cfn.describe_stacks(StackName=self.name) | ||
92 | + LOG.debug(response) | ||
93 | + status = response['Stacks'][0]['StackStatus'] | ||
94 | + LOG.debug('Stack status is: %s', status) | ||
95 | + if status in self.completed_states: | ||
96 | + done = True | ||
97 | + if status in self.failed_states: | ||
98 | + msg = 'Could not create stack %s: %s' % (self.name, status) | ||
99 | + raise ValueError(msg) | ||
100 | + | ||
101 | + def _create(self): | ||
102 | + LOG.debug('create_stack: stack_name=%s', self.name) | ||
103 | + template_body = open(self.template_path).read() | ||
104 | + try: | ||
105 | + response = self._cfn.create_stack( | ||
106 | + StackName=self.name, TemplateBody=template_body, | ||
107 | + Capabilities=['CAPABILITY_IAM']) | ||
108 | + LOG.debug(response) | ||
109 | + except Exception: | ||
110 | + LOG.exception('Unable to create stack') | ||
111 | + self.wait() | ||
112 | + | ||
113 | + def _update(self): | ||
114 | + LOG.debug('create_stack: stack_name=%s', self.name) | ||
115 | + template_body = open(self.template_path).read() | ||
116 | + try: | ||
117 | + response = self._cfn.update_stack( | ||
118 | + StackName=self.name, TemplateBody=template_body, | ||
119 | + Capabilities=['CAPABILITY_IAM']) | ||
120 | + LOG.debug(response) | ||
121 | + except Exception as e: | ||
122 | + if 'ValidationError' in str(e): | ||
123 | + LOG.info('No Updates Required') | ||
124 | + else: | ||
125 | + LOG.exception('Unable to update stack') | ||
126 | + self.wait() | ||
127 | + | ||
128 | + def update(self): | ||
129 | + if self.exists(): | ||
130 | + self._update() | ||
131 | + else: | ||
132 | + self._create() | ||
133 | + | ||
134 | + def status(self): | ||
135 | + return self.exists() | ||
136 | + | ||
137 | + def delete(self): | ||
138 | + LOG.debug('delete_stack: stack_name=%s', self.name) | ||
139 | + try: | ||
140 | + response = self._cfn.delete_stack(StackName=self.name) | ||
141 | + LOG.debug(response) | ||
142 | + except Exception: | ||
143 | + LOG.exception('Unable to delete stack: %s', self.name) |
samples/kinesis/README.md
0 → 100644
1 | +Kinesis Example | ||
2 | +=============== | ||
3 | + | ||
4 | +This is a simple Lambda example that listens for events on a Kinesis stream. | ||
5 | +This example is based on the one from the | ||
6 | +[AWS Lambda Documentation](http://docs.aws.amazon.com/lambda/latest/dg/walkthrough-kinesis-events-adminuser.html). The Lambda function in this example doesn't really do anything other than log some data but this example does show how all of the pieces go together and how to use ``kappa`` to deploy the Lambda function. | ||
7 | + | ||
8 | +What You Need To Do | ||
9 | +------------------- | ||
10 | + | ||
11 | +1. Edit the ``config.yml`` file. Specifically, you will need to edit the ``profile`` and ``region`` values and the ``event_source`` | ||
12 | +2. Run ``kappa --config config.yml deploy`` | ||
13 | +3. Run ``kappa --config config.yml test`` | ||
14 | +4. Run ``kappa --config config.yml tail``. You may have to run this command a few times before the log events become available in CloudWatch Logs. | ||
15 | +5. Run ``kappa --config config.yml add-event-source`` | ||
16 | +6. Try sending data to the Kinesis stream and then tailing the logs again to see if your function is getting called. | ||
17 | + | ||
18 | + | ||
19 | + |
... | @@ -16,6 +16,8 @@ lambda: | ... | @@ -16,6 +16,8 @@ lambda: |
16 | memory_size: 128 | 16 | memory_size: 128 |
17 | timeout: 3 | 17 | timeout: 3 |
18 | mode: event | 18 | mode: event |
19 | - event_source: arn:aws:kinesis:us-east-1:084307701560:stream/lambdastream | 19 | + event_sources: |
20 | + - | ||
21 | + arn: arn:aws:kinesis:us-east-1:084307701560:stream/lambdastream | ||
20 | test_data: input.json | 22 | test_data: input.json |
21 | 23 | ||
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
samples/s3/config.yml
0 → 100644
1 | +--- | ||
2 | +profile: personal | ||
3 | +region: us-east-1 | ||
4 | +cloudformation: | ||
5 | + template: roles.cf | ||
6 | + stack_name: TestS3 | ||
7 | + exec_role: ExecRole | ||
8 | + invoke_role: InvokeRole | ||
9 | +lambda: | ||
10 | + name: S3Sample | ||
11 | + zipfile_name: S3Sample.zip | ||
12 | + description: Testing S3 Lambda handler | ||
13 | + path: examplefolder/ | ||
14 | + handler: CreateThumbnail.handler | ||
15 | + runtime: nodejs | ||
16 | + memory_size: 128 | ||
17 | + timeout: 3 | ||
18 | + mode: event | ||
19 | + test_data: input.json | ||
20 | + event_sources: | ||
21 | + - | ||
22 | + arn: arn:aws:s3:::test-1245812163 | ||
23 | + events: | ||
24 | + - s3:ObjectCreated:* |
samples/s3/examplefolder/CreateThumbnail.js
0 → 100644
1 | +// dependencies | ||
2 | +var async = require('async'); | ||
3 | +var AWS = require('aws-sdk'); | ||
4 | +var gm = require('gm') | ||
5 | + .subClass({ imageMagick: true }); // Enable ImageMagick integration. | ||
6 | +var util = require('util'); | ||
7 | + | ||
8 | +// constants | ||
9 | +var MAX_WIDTH = 100; | ||
10 | +var MAX_HEIGHT = 100; | ||
11 | + | ||
12 | +// get reference to S3 client | ||
13 | +var s3 = new AWS.S3(); | ||
14 | + | ||
15 | +exports.handler = function(event, context) { | ||
16 | + // Read options from the event. | ||
17 | + console.log("Reading options from event:\n", util.inspect(event, {depth: 5})); | ||
18 | + var srcBucket = event.Records[0].s3.bucket.name; | ||
19 | + var srcKey = event.Records[0].s3.object.key; | ||
20 | + var dstBucket = srcBucket + "resized"; | ||
21 | + var dstKey = "resized-" + srcKey; | ||
22 | + | ||
23 | + // Sanity check: validate that source and destination are different buckets. | ||
24 | + if (srcBucket == dstBucket) { | ||
25 | + console.error("Destination bucket must not match source bucket."); | ||
26 | + return; | ||
27 | + } | ||
28 | + | ||
29 | + // Infer the image type. | ||
30 | + var typeMatch = srcKey.match(/\.([^.]*)$/); | ||
31 | + if (!typeMatch) { | ||
32 | + console.error('unable to infer image type for key ' + srcKey); | ||
33 | + return; | ||
34 | + } | ||
35 | + var imageType = typeMatch[1]; | ||
36 | + if (imageType != "jpg" && imageType != "png") { | ||
37 | + console.log('skipping non-image ' + srcKey); | ||
38 | + return; | ||
39 | + } | ||
40 | + | ||
41 | + // Download the image from S3, transform, and upload to a different S3 bucket. | ||
42 | + async.waterfall([ | ||
43 | + function download(next) { | ||
44 | + // Download the image from S3 into a buffer. | ||
45 | + s3.getObject({ | ||
46 | + Bucket: srcBucket, | ||
47 | + Key: srcKey | ||
48 | + }, | ||
49 | + next); | ||
50 | + }, | ||
51 | + function tranform(response, next) { | ||
52 | + gm(response.Body).size(function(err, size) { | ||
53 | + // Infer the scaling factor to avoid stretching the image unnaturally. | ||
54 | + var scalingFactor = Math.min( | ||
55 | + MAX_WIDTH / size.width, | ||
56 | + MAX_HEIGHT / size.height | ||
57 | + ); | ||
58 | + var width = scalingFactor * size.width; | ||
59 | + var height = scalingFactor * size.height; | ||
60 | + | ||
61 | + // Transform the image buffer in memory. | ||
62 | + this.resize(width, height) | ||
63 | + .toBuffer(imageType, function(err, buffer) { | ||
64 | + if (err) { | ||
65 | + next(err); | ||
66 | + } else { | ||
67 | + next(null, response.ContentType, buffer); | ||
68 | + } | ||
69 | + }); | ||
70 | + }); | ||
71 | + }, | ||
72 | + function upload(contentType, data, next) { | ||
73 | + // Stream the transformed image to a different S3 bucket. | ||
74 | + s3.putObject({ | ||
75 | + Bucket: dstBucket, | ||
76 | + Key: dstKey, | ||
77 | + Body: data, | ||
78 | + ContentType: contentType | ||
79 | + }, | ||
80 | + next); | ||
81 | + } | ||
82 | + ], function (err) { | ||
83 | + if (err) { | ||
84 | + console.error( | ||
85 | + 'Unable to resize ' + srcBucket + '/' + srcKey + | ||
86 | + ' and upload to ' + dstBucket + '/' + dstKey + | ||
87 | + ' due to an error: ' + err | ||
88 | + ); | ||
89 | + } else { | ||
90 | + console.log( | ||
91 | + 'Successfully resized ' + srcBucket + '/' + srcKey + | ||
92 | + ' and uploaded to ' + dstBucket + '/' + dstKey | ||
93 | + ); | ||
94 | + } | ||
95 | + | ||
96 | + context.done(); | ||
97 | + } | ||
98 | + ); | ||
99 | +}; |
samples/s3/input.json
0 → 100644
1 | +{ | ||
2 | + "Records":[ | ||
3 | + { | ||
4 | + "eventVersion":"2.0", | ||
5 | + "eventSource":"aws:s3", | ||
6 | + "awsRegion":"us-east-1", | ||
7 | + "eventTime":"1970-01-01T00:00:00.000Z", | ||
8 | + "eventName":"ObjectCreated:Put", | ||
9 | + "userIdentity":{ | ||
10 | + "principalId":"AIDAJDPLRKLG7UEXAMPLE" | ||
11 | + }, | ||
12 | + "requestParameters":{ | ||
13 | + "sourceIPAddress":"127.0.0.1" | ||
14 | + }, | ||
15 | + "responseElements":{ | ||
16 | + "x-amz-request-id":"C3D13FE58DE4C810", | ||
17 | + "x-amz-id-2":"FMyUVURIY8/IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S/JRWeUWerMUE5JgHvANOjpD" | ||
18 | + }, | ||
19 | + "s3":{ | ||
20 | + "s3SchemaVersion":"1.0", | ||
21 | + "configurationId":"testConfigRule", | ||
22 | + "bucket":{ | ||
23 | + "name":"sourcebucket", | ||
24 | + "ownerIdentity":{ | ||
25 | + "principalId":"A3NL1KOZZKExample" | ||
26 | + }, | ||
27 | + "arn":"arn:aws:s3:::sourcebucket" | ||
28 | + }, | ||
29 | + "object":{ | ||
30 | + "key":"HappyFace.jpg", | ||
31 | + "size":1024, | ||
32 | + "eTag":"d41d8cd98f00b204e9800998ecf8427e", | ||
33 | + "versionId":"096fKKXTRTtl3on89fVO.nfljtsv6qko" | ||
34 | + } | ||
35 | + } | ||
36 | + } | ||
37 | + ] | ||
38 | +} |
samples/s3/roles.cf
0 → 100644
1 | +{ | ||
2 | + "AWSTemplateFormatVersion": "2010-09-09", | ||
3 | + "Resources": { | ||
4 | + "ExecRole": { | ||
5 | + "Type": "AWS::IAM::Role", | ||
6 | + "Properties": { | ||
7 | + "AssumeRolePolicyDocument": { | ||
8 | + "Version" : "2012-10-17", | ||
9 | + "Statement": [ { | ||
10 | + "Effect": "Allow", | ||
11 | + "Principal": { | ||
12 | + "Service": [ "lambda.amazonaws.com" ] | ||
13 | + }, | ||
14 | + "Action": [ "sts:AssumeRole" ] | ||
15 | + } ] | ||
16 | + } | ||
17 | + } | ||
18 | + }, | ||
19 | + "ExecRolePolicies": { | ||
20 | + "Type": "AWS::IAM::Policy", | ||
21 | + "Properties": { | ||
22 | + "PolicyName": "ExecRolePolicy", | ||
23 | + "PolicyDocument": { | ||
24 | + "Version" : "2012-10-17", | ||
25 | + "Statement": [ { | ||
26 | + "Effect": "Allow", | ||
27 | + "Action": [ | ||
28 | + "logs:*" | ||
29 | + ], | ||
30 | + "Resource": "arn:aws:logs:*:*:*" | ||
31 | + }, | ||
32 | + { | ||
33 | + "Effect": "Allow", | ||
34 | + "Action": [ | ||
35 | + "s3:GetObject", | ||
36 | + "s3:PutObject" | ||
37 | + ], | ||
38 | + "Resource": [ | ||
39 | + "arn:aws:s3:::*" | ||
40 | + ] | ||
41 | + } ] | ||
42 | + }, | ||
43 | + "Roles": [ { "Ref": "ExecRole" } ] | ||
44 | + } | ||
45 | + }, | ||
46 | + "InvokeRole": { | ||
47 | + "Type": "AWS::IAM::Role", | ||
48 | + "Properties": { | ||
49 | + "AssumeRolePolicyDocument": { | ||
50 | + "Version" : "2012-10-17", | ||
51 | + "Statement": [ { | ||
52 | + "Effect": "Allow", | ||
53 | + "Principal": { | ||
54 | + "Service": [ "s3.amazonaws.com" ] | ||
55 | + }, | ||
56 | + "Action": [ "sts:AssumeRole" ], | ||
57 | + "Condition": { | ||
58 | + "ArnLike": { | ||
59 | + "sts:ExternalId": "arn:aws:s3:::*" | ||
60 | + } | ||
61 | + } | ||
62 | + } ] | ||
63 | + } | ||
64 | + } | ||
65 | + }, | ||
66 | + "InvokeRolePolicies": { | ||
67 | + "Type": "AWS::IAM::Policy", | ||
68 | + "Properties": { | ||
69 | + "PolicyName": "ExecRolePolicy", | ||
70 | + "PolicyDocument": { | ||
71 | + "Version" : "2012-10-17", | ||
72 | + "Statement": [ | ||
73 | + { | ||
74 | + "Effect": "Allow", | ||
75 | + "Resource": [ | ||
76 | + "*" | ||
77 | + ], | ||
78 | + "Action": [ | ||
79 | + "lambda:InvokeFunction" | ||
80 | + ] | ||
81 | + } | ||
82 | + ] | ||
83 | + }, | ||
84 | + "Roles": [ { "Ref": "InvokeRole" } ] | ||
85 | + } | ||
86 | + } | ||
87 | + } | ||
88 | +} |
... | @@ -5,7 +5,7 @@ from setuptools import setup, find_packages | ... | @@ -5,7 +5,7 @@ from setuptools import setup, find_packages |
5 | import os | 5 | import os |
6 | 6 | ||
7 | requires = [ | 7 | requires = [ |
8 | - 'botocore==0.75.0', | 8 | + 'botocore==0.94.0', |
9 | 'click==3.3', | 9 | 'click==3.3', |
10 | 'PyYAML>=3.11' | 10 | 'PyYAML>=3.11' |
11 | ] | 11 | ] | ... | ... |
tests/__init__.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. |
tests/unit/__init__.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. |
tests/unit/data/roles.cf
0 → 100644
File mode changed
tests/unit/mock_aws.py
0 → 100644
1 | +import mock | ||
2 | + | ||
3 | +import tests.unit.responses as responses | ||
4 | + | ||
5 | + | ||
6 | +class MockAWS(object): | ||
7 | + | ||
8 | + def __init__(self, profile=None, region=None): | ||
9 | + pass | ||
10 | + | ||
11 | + def create_client(self, client_name): | ||
12 | + client = None | ||
13 | + if client_name == 'logs': | ||
14 | + client = mock.Mock() | ||
15 | + choices = responses.logs_describe_log_groups | ||
16 | + client.describe_log_groups = mock.Mock( | ||
17 | + side_effect=choices) | ||
18 | + choices = responses.logs_describe_log_streams | ||
19 | + client.describe_log_streams = mock.Mock( | ||
20 | + side_effect=choices) | ||
21 | + choices = responses.logs_get_log_events | ||
22 | + client.get_log_events = mock.Mock( | ||
23 | + side_effect=choices) | ||
24 | + if client_name == 'cloudformation': | ||
25 | + client = mock.Mock() | ||
26 | + choices = responses.cfn_list_stack_resources | ||
27 | + client.list_stack_resources = mock.Mock( | ||
28 | + side_effect=choices) | ||
29 | + choices = responses.cfn_describe_stacks | ||
30 | + client.describe_stacks = mock.Mock( | ||
31 | + side_effect=choices) | ||
32 | + choices = responses.cfn_create_stack | ||
33 | + client.create_stack = mock.Mock( | ||
34 | + side_effect=choices) | ||
35 | + choices = responses.cfn_delete_stack | ||
36 | + client.delete_stack = mock.Mock( | ||
37 | + side_effect=choices) | ||
38 | + if client_name == 'iam': | ||
39 | + client = mock.Mock() | ||
40 | + choices = responses.iam_get_role | ||
41 | + client.get_role = mock.Mock( | ||
42 | + side_effect=choices) | ||
43 | + return client | ||
44 | + | ||
45 | + | ||
46 | +def get_aws(context): | ||
47 | + return MockAWS() |
tests/unit/responses.py
0 → 100644
1 | +import datetime | ||
2 | +from dateutil.tz import tzutc | ||
3 | + | ||
4 | +cfn_list_stack_resources = [{'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'dd35f0ef-9699-11e4-ba38-c355c9515dbc'}, u'StackResourceSummaries': [{u'ResourceStatus': 'CREATE_COMPLETE', u'ResourceType': 'AWS::IAM::Role', u'ResourceStatusReason': None, u'LastUpdatedTimestamp': datetime.datetime(2015, 1, 6, 17, 37, 54, 861000, tzinfo=tzutc()), u'PhysicalResourceId': 'TestKinesis-InvokeRole-IF6VUXY9MBJN', u'LogicalResourceId': 'InvokeRole'}, {u'ResourceStatus': 'CREATE_COMPLETE', u'ResourceType': 'AWS::IAM::Role', u'ResourceStatusReason': None, u'LastUpdatedTimestamp': datetime.datetime(2015, 1, 6, 17, 37, 55, 18000, tzinfo=tzutc()), u'PhysicalResourceId': 'TestKinesis-ExecRole-567SAV6TZOET', u'LogicalResourceId': 'ExecRole'}, {u'ResourceStatus': 'CREATE_COMPLETE', u'ResourceType': 'AWS::IAM::Policy', u'ResourceStatusReason': None, u'LastUpdatedTimestamp': datetime.datetime(2015, 1, 6, 17, 37, 58, 120000, tzinfo=tzutc()), u'PhysicalResourceId': 'TestK-Invo-OMW5SDLQM8FM', u'LogicalResourceId': 'InvokeRolePolicies'}, {u'ResourceStatus': 'CREATE_COMPLETE', u'ResourceType': 'AWS::IAM::Policy', u'ResourceStatusReason': None, u'LastUpdatedTimestamp': datetime.datetime(2015, 1, 6, 17, 37, 58, 454000, tzinfo=tzutc()), u'PhysicalResourceId': 'TestK-Exec-APWRVKTBPPPT', u'LogicalResourceId': 'ExecRolePolicies'}]}] | ||
5 | + | ||
6 | +iam_get_role = [{u'Role': {u'AssumeRolePolicyDocument': {u'Version': u'2012-10-17', u'Statement': [{u'Action': u'sts:AssumeRole', u'Principal': {u'Service': u's3.amazonaws.com'}, u'Effect': u'Allow', u'Condition': {u'ArnLike': {u'sts:ExternalId': u'arn:aws:s3:::*'}}, u'Sid': u''}, {u'Action': u'sts:AssumeRole', u'Principal': {u'Service': u'lambda.amazonaws.com'}, u'Effect': u'Allow', u'Sid': u''}]}, u'RoleId': 'AROAIEVJHUJG2I4MG5PSC', u'CreateDate': datetime.datetime(2015, 1, 6, 17, 37, 44, tzinfo=tzutc()), u'RoleName': 'TestKinesis-InvokeRole-IF6VUXY9MBJN', u'Path': '/', u'Arn': 'arn:aws:iam::0123456789012:role/TestKinesis-InvokeRole-FOO'}, 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'dd6e8d42-9699-11e4-afe6-d3625e8b365b'}}] | ||
7 | + | ||
8 | +logs_describe_log_groups = [{'ResponseMetadata': {'HTTPStatusCode': 200, | ||
9 | + 'RequestId': 'da962431-afed-11e4-8c17-1776597471e6'}, | ||
10 | + u'logGroups': [{u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample*', | ||
11 | + u'creationTime': 1423175925414, | ||
12 | + u'logGroupName': u'foo/bar', | ||
13 | + u'metricFilterCount': 1, | ||
14 | + u'storedBytes': 0}]}, | ||
15 | +{'ResponseMetadata': {'HTTPStatusCode': 200, | ||
16 | + 'RequestId': 'da962431-afed-11e4-8c17-1776597471e6'}, | ||
17 | + u'logGroups': [{u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample*', | ||
18 | + u'creationTime': 1423175925414, | ||
19 | + u'logGroupName': u'foo/bar', | ||
20 | + u'metricFilterCount': 1, | ||
21 | + u'storedBytes': 0}]}] | ||
22 | + | ||
23 | +logs_describe_log_streams = [{u'logStreams': [{u'firstEventTimestamp': 1417042749449, u'lastEventTimestamp': 1417042749547, u'creationTime': 1417042748263, u'uploadSequenceToken': u'49540114640150833041490484409222729829873988799393975922', u'logStreamName': u'1cc48e4e613246b7974094323259d600', u'lastIngestionTime': 1417042750483, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:1cc48e4e613246b7974094323259d600', u'storedBytes': 712}, {u'firstEventTimestamp': 1417272406988, u'lastEventTimestamp': 1417272407088, u'creationTime': 1417272405690, u'uploadSequenceToken': u'49540113907504451034164105858363493278561872472363261986', u'logStreamName': u'2782a5ff88824c85a9639480d1ed7bbe', u'lastIngestionTime': 1417272408043, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:2782a5ff88824c85a9639480d1ed7bbe', u'storedBytes': 712}, {u'firstEventTimestamp': 1420569035842, u'lastEventTimestamp': 1420569035941, u'creationTime': 1420569034614, u'uploadSequenceToken': u'49540113907883563702539166025438885323514410026454245426', u'logStreamName': u'2d62991a479b4ebf9486176122b72a55', u'lastIngestionTime': 1420569036909, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:2d62991a479b4ebf9486176122b72a55', u'storedBytes': 709}, {u'firstEventTimestamp': 1418244027421, u'lastEventTimestamp': 1418244027541, u'creationTime': 1418244026907, u'uploadSequenceToken': u'49540113964795065449189116778452984186276757901477438642', u'logStreamName': u'4f44ffa128d6405591ca83b2b0f9dd2d', u'lastIngestionTime': 1418244028484, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:4f44ffa128d6405591ca83b2b0f9dd2d', u'storedBytes': 1010}, {u'firstEventTimestamp': 1418242565524, u'lastEventTimestamp': 1418242565641, u'creationTime': 1418242564196, u'uploadSequenceToken': u'49540113095132904942090446312687285178819573422397343074', u'logStreamName': u'69c5ac87e7e6415985116e8cb44e538e', u'lastIngestionTime': 1418242566558, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:69c5ac87e7e6415985116e8cb44e538e', u'storedBytes': 713}, {u'firstEventTimestamp': 1417213193378, u'lastEventTimestamp': 1417213193478, u'creationTime': 1417213192095, u'uploadSequenceToken': u'49540113336360065754596187770479764234792559857643841394', u'logStreamName': u'f68e3d87b8a14cdba338f6926f7cf50a', u'lastIngestionTime': 1417213194421, u'arn': u'arn:aws:logs:us-east-1:0123456789012:log-group:/aws/lambda/KinesisSample:log-stream:f68e3d87b8a14cdba338f6926f7cf50a', u'storedBytes': 711}], 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '2a6d4941-969b-11e4-947f-19d1c72ede7e'}}] | ||
24 | + | ||
25 | +logs_get_log_events = [{'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '2a7deb71-969b-11e4-914b-8f1f3d7b023d'}, u'nextForwardToken': u'f/31679748107442531967654742688057700554200447759088287749', u'events': [{u'ingestionTime': 1420569036909, u'timestamp': 1420569035842, u'message': u'2015-01-06T18:30:35.841Z\tko2sss03iq7l2pdk\tLoading event\n'}, {u'ingestionTime': 1420569036909, u'timestamp': 1420569035899, u'message': u'START RequestId: 23007242-95d2-11e4-a10e-7b2ab60a7770\n'}, {u'ingestionTime': 1420569036909, u'timestamp': 1420569035940, u'message': u'2015-01-06T18:30:35.940Z\t23007242-95d2-11e4-a10e-7b2ab60a7770\t{\n "Records": [\n {\n "kinesis": {\n "partitionKey": "partitionKey-3",\n "kinesisSchemaVersion": "1.0",\n "data": "SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=",\n "sequenceNumber": "49545115243490985018280067714973144582180062593244200961"\n },\n "eventSource": "aws:kinesis",\n "eventID": "shardId-000000000000:49545115243490985018280067714973144582180062593244200961",\n "invokeIdentityArn": "arn:aws:iam::0123456789012:role/testLEBRole",\n "eventVersion": "1.0",\n "eventName": "aws:kinesis:record",\n "eventSourceARN": "arn:aws:kinesis:us-east-1:35667example:stream/examplestream",\n "awsRegion": "us-east-1"\n }\n ]\n}\n'}, {u'ingestionTime': 1420569036909, u'timestamp': 1420569035940, u'message': u'2015-01-06T18:30:35.940Z\t23007242-95d2-11e4-a10e-7b2ab60a7770\tDecoded payload: Hello, this is a test 123.\n'}, {u'ingestionTime': 1420569036909, u'timestamp': 1420569035941, u'message': u'END RequestId: 23007242-95d2-11e4-a10e-7b2ab60a7770\n'}, {u'ingestionTime': 1420569036909, u'timestamp': 1420569035941, u'message': u'REPORT RequestId: 23007242-95d2-11e4-a10e-7b2ab60a7770\tDuration: 98.51 ms\tBilled Duration: 100 ms \tMemory Size: 128 MB\tMax Memory Used: 26 MB\t\n'}], u'nextBackwardToken': u'b/31679748105234758193000210997045664445208259969996226560'}] | ||
26 | + | ||
27 | +cfn_describe_stacks = [ | ||
28 | + {u'Stacks': [{u'StackId': 'arn:aws:cloudformation:us-east-1:084307701560:stack/TestKinesis/7c4ae730-96b8-11e4-94cc-5001dc3ed8d2', u'Description': None, u'Tags': [], u'StackStatusReason': 'User Initiated', u'CreationTime': datetime.datetime(2015, 1, 7, 21, 59, 43, 208000, tzinfo=tzutc()), u'Capabilities': ['CAPABILITY_IAM'], u'StackName': 'TestKinesis', u'NotificationARNs': [], u'StackStatus': 'CREATE_IN_PROGRESS', u'DisableRollback': False}], 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '7d66debd-96b8-11e4-a647-4f4741ffff69'}}, | ||
29 | + {u'Stacks': [{u'StackId': 'arn:aws:cloudformation:us-east-1:084307701560:stack/TestKinesis/7c4ae730-96b8-11e4-94cc-5001dc3ed8d2', u'Description': None, u'Tags': [], u'StackStatusReason': 'User Initiated', u'CreationTime': datetime.datetime(2015, 1, 7, 21, 59, 43, 208000, tzinfo=tzutc()), u'Capabilities': ['CAPABILITY_IAM'], u'StackName': 'TestKinesis', u'NotificationARNs': [], u'StackStatus': 'CREATE_IN_PROGRESS', u'DisableRollback': False}], 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '7e36fff7-96b8-11e4-af44-6350f4f8c2ae'}}, | ||
30 | + {u'Stacks': [{u'StackId': 'arn:aws:cloudformation:us-east-1:084307701560:stack/TestKinesis/7c4ae730-96b8-11e4-94cc-5001dc3ed8d2', u'Description': None, u'Tags': [], u'StackStatusReason': 'User Initiated', u'CreationTime': datetime.datetime(2015, 1, 7, 21, 59, 43, 208000, tzinfo=tzutc()), u'Capabilities': ['CAPABILITY_IAM'], u'StackName': 'TestKinesis', u'NotificationARNs': [], u'StackStatus': 'CREATE_IN_PROGRESS', u'DisableRollback': False}], 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '7ef03e10-96b8-11e4-bc86-7f67e11abcfa'}}, | ||
31 | + {u'Stacks': [{u'StackId': 'arn:aws:cloudformation:us-east-1:084307701560:stack/TestKinesis/7c4ae730-96b8-11e4-94cc-5001dc3ed8d2', u'Description': None, u'Tags': [], u'StackStatusReason': None, u'CreationTime': datetime.datetime(2015, 1, 7, 21, 59, 43, 208000, tzinfo=tzutc()), u'Capabilities': ['CAPABILITY_IAM'], u'StackName': 'TestKinesis', u'NotificationARNs': [], u'StackStatus': 'CREATE_COMPLETE', u'DisableRollback': False}], 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '8c2bff8e-96b8-11e4-be70-c5ad82c32f2d'}}] | ||
32 | + | ||
33 | +cfn_create_stack = [{u'StackId': 'arn:aws:cloudformation:us-east-1:084307701560:stack/TestKinesis/7c4ae730-96b8-11e4-94cc-5001dc3ed8d2', 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '7c2f2260-96b8-11e4-be70-c5ad82c32f2d'}}] | ||
34 | + | ||
35 | +cfn_delete_stack = [{'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'f19af5b8-96bc-11e4-860e-11ba752b58a9'}}] |
tests/unit/test_log.py
0 → 100644
1 | +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import unittest | ||
15 | + | ||
16 | +import mock | ||
17 | + | ||
18 | +from kappa.log import Log | ||
19 | +from tests.unit.mock_aws import get_aws | ||
20 | + | ||
21 | + | ||
22 | +class TestLog(unittest.TestCase): | ||
23 | + | ||
24 | + def setUp(self): | ||
25 | + self.aws_patch = mock.patch('kappa.aws.get_aws', get_aws) | ||
26 | + self.mock_aws = self.aws_patch.start() | ||
27 | + | ||
28 | + def tearDown(self): | ||
29 | + self.aws_patch.stop() | ||
30 | + | ||
31 | + def test_streams(self): | ||
32 | + mock_context = mock.Mock() | ||
33 | + log = Log(mock_context, 'foo/bar') | ||
34 | + streams = log.streams() | ||
35 | + self.assertEqual(len(streams), 6) | ||
36 | + | ||
37 | + def test_tail(self): | ||
38 | + mock_context = mock.Mock() | ||
39 | + log = Log(mock_context, 'foo/bar') | ||
40 | + events = log.tail() | ||
41 | + self.assertEqual(len(events), 6) | ||
42 | + self.assertEqual(events[0]['ingestionTime'], 1420569036909) | ||
43 | + self.assertIn('RequestId: 23007242-95d2-11e4-a10e-7b2ab60a7770', | ||
44 | + events[-1]['message']) |
tests/unit/test_stack.py
0 → 100644
1 | +# Copyright (c) 2015 Mitch Garnaat http://garnaat.org/ | ||
2 | +# | ||
3 | +# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
4 | +# may not use this file except in compliance with the License. A copy of | ||
5 | +# the License is located at | ||
6 | +# | ||
7 | +# http://aws.amazon.com/apache2.0/ | ||
8 | +# | ||
9 | +# or in the "license" file accompanying this file. This file is | ||
10 | +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
11 | +# ANY KIND, either express or implied. See the License for the specific | ||
12 | +# language governing permissions and limitations under the License. | ||
13 | + | ||
14 | +import unittest | ||
15 | +import os | ||
16 | + | ||
17 | +import mock | ||
18 | + | ||
19 | +from kappa.stack import Stack | ||
20 | +from tests.unit.mock_aws import get_aws | ||
21 | + | ||
22 | +Config = { | ||
23 | + 'template': 'roles.cf', | ||
24 | + 'stack_name': 'FooBar', | ||
25 | + 'exec_role': 'ExecRole', | ||
26 | + 'invoke_role': 'InvokeRole'} | ||
27 | + | ||
28 | + | ||
29 | +def path(filename): | ||
30 | + return os.path.join(os.path.dirname(__file__), 'data', filename) | ||
31 | + | ||
32 | + | ||
33 | +class TestStack(unittest.TestCase): | ||
34 | + | ||
35 | + def setUp(self): | ||
36 | + self.aws_patch = mock.patch('kappa.aws.get_aws', get_aws) | ||
37 | + self.mock_aws = self.aws_patch.start() | ||
38 | + Config['template'] = path(Config['template']) | ||
39 | + | ||
40 | + def tearDown(self): | ||
41 | + self.aws_patch.stop() | ||
42 | + | ||
43 | + def test_properties(self): | ||
44 | + mock_context = mock.Mock() | ||
45 | + stack = Stack(mock_context, Config) | ||
46 | + self.assertEqual(stack.name, Config['stack_name']) | ||
47 | + self.assertEqual(stack.template_path, Config['template']) | ||
48 | + self.assertEqual(stack.exec_role, Config['exec_role']) | ||
49 | + self.assertEqual(stack.invoke_role, Config['invoke_role']) | ||
50 | + self.assertEqual( | ||
51 | + stack.invoke_role_arn, | ||
52 | + 'arn:aws:iam::0123456789012:role/TestKinesis-InvokeRole-FOO') | ||
53 | + | ||
54 | + def test_exists(self): | ||
55 | + mock_context = mock.Mock() | ||
56 | + stack = Stack(mock_context, Config) | ||
57 | + self.assertTrue(stack.exists()) | ||
58 | + | ||
59 | + def test_update(self): | ||
60 | + mock_context = mock.Mock() | ||
61 | + stack = Stack(mock_context, Config) | ||
62 | + stack.update() | ||
63 | + | ||
64 | + def test_delete(self): | ||
65 | + mock_context = mock.Mock() | ||
66 | + stack = Stack(mock_context, Config) | ||
67 | + stack.delete() |
-
Please register or login to post a comment