Mitch Garnaat

WIP commit on significant refactoring of code.

...@@ -14,33 +14,8 @@ ...@@ -14,33 +14,8 @@
14 import logging 14 import logging
15 15
16 import click 16 import click
17 -import yaml
18 17
19 -from kappa import Kappa 18 +from kappa.context import Context
20 -
21 -FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
22 -
23 -
24 -def set_debug_logger(logger_names=['kappa'], stream=None):
25 - """
26 - Convenience function to quickly configure full debug output
27 - to go to the console.
28 - """
29 - for logger_name in logger_names:
30 - log = logging.getLogger(logger_name)
31 - log.setLevel(logging.DEBUG)
32 -
33 - ch = logging.StreamHandler(stream)
34 - ch.setLevel(logging.DEBUG)
35 -
36 - # create formatter
37 - formatter = logging.Formatter(FmtString)
38 -
39 - # add formatter to ch
40 - ch.setFormatter(formatter)
41 -
42 - # add ch to logger
43 - log.addHandler(ch)
44 19
45 20
46 @click.command() 21 @click.command()
...@@ -62,27 +37,26 @@ def set_debug_logger(logger_names=['kappa'], stream=None): ...@@ -62,27 +37,26 @@ def set_debug_logger(logger_names=['kappa'], stream=None):
62 type=click.Choice(['deploy', 'test', 'tail', 'add-event-source', 'delete']) 37 type=click.Choice(['deploy', 'test', 'tail', 'add-event-source', 'delete'])
63 ) 38 )
64 def main(config=None, debug=False, command=None): 39 def main(config=None, debug=False, command=None):
65 - if debug: 40 + ctx = Context(config, debug)
66 - set_debug_logger()
67 - config = yaml.load(config)
68 - kappa = Kappa(config)
69 if command == 'deploy': 41 if command == 'deploy':
70 click.echo('Deploying ...') 42 click.echo('Deploying ...')
71 - kappa.deploy() 43 + ctx.deploy()
72 click.echo('...done') 44 click.echo('...done')
73 elif command == 'test': 45 elif command == 'test':
74 click.echo('Sending test data ...') 46 click.echo('Sending test data ...')
75 - kappa.test() 47 + ctx.test()
76 click.echo('...done') 48 click.echo('...done')
77 elif command == 'tail': 49 elif command == 'tail':
78 - kappa.tail() 50 + events = ctx.tail()
51 + for event in events:
52 + print(event['message'])
79 elif command == 'delete': 53 elif command == 'delete':
80 click.echo('Deleting ...') 54 click.echo('Deleting ...')
81 - kappa.delete() 55 + ctx.delete()
82 click.echo('...done') 56 click.echo('...done')
83 elif command == 'add-event-source': 57 elif command == 'add-event-source':
84 click.echo('Adding event source ...') 58 click.echo('Adding event source ...')
85 - kappa.add_event_source() 59 + ctx.add_event_source()
86 click.echo('...done') 60 click.echo('...done')
87 61
88 62
......
...@@ -11,233 +11,6 @@ ...@@ -11,233 +11,6 @@
11 # ANY KIND, either express or implied. See the License for the specific 11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License. 12 # language governing permissions and limitations under the License.
13 13
14 -import logging
15 import os 14 import os
16 -import zipfile
17 -import time
18 15
19 -import botocore.session 16 +__version__ = open(os.path.join(os.path.dirname(__file__), '_version')).read()
20 -from botocore.exceptions import ClientError
21 -
22 -LOG = logging.getLogger(__name__)
23 -
24 -
25 -class Kappa(object):
26 -
27 - completed_states = ('CREATE_COMPLETE', 'UPDATE_COMPLETE')
28 -
29 - def __init__(self, config):
30 - self.config = config
31 - self.session = botocore.session.get_session()
32 - self.session.profile = config['profile']
33 - self.region = config['region']
34 -
35 - def create_update_roles(self, stack_name, roles_path):
36 - LOG.debug('create_update_policies: stack_name=%s', stack_name)
37 - LOG.debug('create_update_policies: roles_path=%s', roles_path)
38 - cfn = self.session.create_client('cloudformation', self.region)
39 - # Does stack already exist?
40 - try:
41 - response = cfn.describe_stacks(StackName=stack_name)
42 - LOG.debug('Stack %s already exists', stack_name)
43 - except ClientError:
44 - LOG.debug('Stack %s does not exist', stack_name)
45 - response = None
46 - template_body = open(roles_path).read()
47 - if response:
48 - try:
49 - cfn.update_stack(
50 - StackName=stack_name, TemplateBody=template_body,
51 - Capabilities=['CAPABILITY_IAM'])
52 - except ClientError, e:
53 - LOG.debug(str(e))
54 - else:
55 - response = cfn.create_stack(
56 - StackName=stack_name, TemplateBody=template_body,
57 - Capabilities=['CAPABILITY_IAM'])
58 - done = False
59 - while not done:
60 - time.sleep(1)
61 - response = cfn.describe_stacks(StackName=stack_name)
62 - status = response['Stacks'][0]['StackStatus']
63 - LOG.debug('Stack status is: %s', status)
64 - if status in self.completed_states:
65 - done = True
66 -
67 - def get_role_arn(self, role_name):
68 - role_arn = None
69 - cfn = self.session.create_client('cloudformation', self.region)
70 - try:
71 - resources = cfn.list_stack_resources(
72 - StackName=self.config['cloudformation']['stack_name'])
73 - except Exception:
74 - LOG.exception('Unable to find role ARN: %s', role_name)
75 - for resource in resources['StackResourceSummaries']:
76 - if resource['LogicalResourceId'] == role_name:
77 - iam = self.session.create_client('iam')
78 - role = iam.get_role(RoleName=resource['PhysicalResourceId'])
79 - role_arn = role['Role']['Arn']
80 - LOG.debug('role_arn: %s', role_arn)
81 - return role_arn
82 -
83 - def delete_roles(self, stack_name):
84 - LOG.debug('delete_roles: stack_name=%s', stack_name)
85 - cfn = self.session.create_client('cloudformation', self.region)
86 - try:
87 - cfn.delete_stack(StackName=stack_name)
88 - except Exception:
89 - LOG.exception('Unable to delete stack: %s', stack_name)
90 -
91 - def _zip_lambda_dir(self, zipfile_name, lambda_dir):
92 - LOG.debug('_zip_lambda_dir: lambda_dir=%s', lambda_dir)
93 - LOG.debug('zipfile_name=%s', zipfile_name)
94 - relroot = os.path.abspath(lambda_dir)
95 - with zipfile.ZipFile(zipfile_name, 'w') as zf:
96 - for root, dirs, files in os.walk(lambda_dir):
97 - zf.write(root, os.path.relpath(root, relroot))
98 - for file in files:
99 - filename = os.path.join(root, file)
100 - if os.path.isfile(filename):
101 - arcname = os.path.join(
102 - os.path.relpath(root, relroot), file)
103 - zf.write(filename, arcname)
104 -
105 - def _zip_lambda_file(self, zipfile_name, lambda_file):
106 - LOG.debug('_zip_lambda_file: lambda_file=%s', lambda_file)
107 - LOG.debug('zipfile_name=%s', zipfile_name)
108 - with zipfile.ZipFile(zipfile_name, 'w') as zf:
109 - zf.write(lambda_file)
110 -
111 - def zip_lambda_function(self, zipfile_name, lambda_fn):
112 - if os.path.isdir(lambda_fn):
113 - self._zip_lambda_dir(zipfile_name, lambda_fn)
114 - else:
115 - self._zip_lambda_file(zipfile_name, lambda_fn)
116 -
117 - def upload_lambda_function(self, zip_file):
118 - LOG.debug('uploading %s', zip_file)
119 - lambda_svc = self.session.create_client('lambda', self.region)
120 - with open(zip_file, 'rb') as fp:
121 - exec_role = self.get_role_arn(
122 - self.config['cloudformation']['exec_role'])
123 - try:
124 - response = lambda_svc.upload_function(
125 - FunctionName=self.config['lambda']['name'],
126 - FunctionZip=fp,
127 - Runtime=self.config['lambda']['runtime'],
128 - Role=exec_role,
129 - Handler=self.config['lambda']['handler'],
130 - Mode=self.config['lambda']['mode'],
131 - Description=self.config['lambda']['description'],
132 - Timeout=self.config['lambda']['timeout'],
133 - MemorySize=self.config['lambda']['memory_size'])
134 - LOG.debug(response)
135 - except Exception:
136 - LOG.exception('Unable to upload zip file')
137 -
138 - def delete_lambda_function(self, function_name):
139 - LOG.debug('deleting function %s', function_name)
140 - lambda_svc = self.session.create_client('lambda', self.region)
141 - response = lambda_svc.delete_function(FunctionName=function_name)
142 - LOG.debug(response)
143 - return response
144 -
145 - def _invoke_asynch(self, data_file):
146 - LOG.debug('_invoke_async %s', data_file)
147 - with open(data_file) as fp:
148 - lambda_svc = self.session.create_client('lambda', self.region)
149 - response = lambda_svc.invoke_async(
150 - FunctionName=self.config['lambda']['name'],
151 - InvokeArgs=fp)
152 - LOG.debug(response)
153 -
154 - def _tail(self, function_name):
155 - LOG.debug('tailing function: %s', function_name)
156 - log_svc = self.session.create_client('logs', self.region)
157 - # kinda kludgy but can't find any way to get log group name
158 - log_group_name = '/aws/lambda/%s' % function_name
159 - latest_stream = None
160 - response = log_svc.describe_log_streams(logGroupName=log_group_name)
161 - # The streams are not ordered by time, hence this ugliness
162 - for stream in response['logStreams']:
163 - if not latest_stream:
164 - latest_stream = stream
165 - elif stream['lastEventTimestamp'] > latest_stream['lastEventTimestamp']:
166 - latest_stream = stream
167 - response = log_svc.get_log_events(
168 - logGroupName=log_group_name,
169 - logStreamName=latest_stream['logStreamName'])
170 - for log_event in response['events']:
171 - print(log_event['message'])
172 -
173 - def _get_function_arn(self):
174 - name = self.config['lambda']['name']
175 - arn = None
176 - lambda_svc = self.session.create_client('lambda', self.region)
177 - try:
178 - response = lambda_svc.get_function_configuration(
179 - FunctionName=name)
180 - LOG.debug(response)
181 - arn = response['FunctionARN']
182 - except Exception:
183 - LOG.debug('Unable to find ARN for function: %s' % name)
184 - return arn
185 -
186 - def _add_kinesis_event_source(self, event_source_arn):
187 - lambda_svc = self.session.create_client('lambda', self.region)
188 - try:
189 - invoke_role = self.get_role_arn(
190 - self.config['cloudformation']['invoke_role'])
191 - response = lambda_svc.add_event_source(
192 - FunctionName=self.config['lambda']['name'],
193 - Role=invoke_role,
194 - EventSource=event_source_arn,
195 - BatchSize=self.config['lambda'].get('batch_size', 100))
196 - LOG.debug(response)
197 - except Exception:
198 - LOG.exception('Unable to add event source')
199 -
200 - def _add_s3_event_source(self, event_source_arn):
201 - s3_svc = self.session.create_client('s3', self.region)
202 - bucket_name = event_source_arn.split(':')[-1]
203 - invoke_role = self.get_role_arn(
204 - self.config['cloudformation']['invoke_role'])
205 - notification_spec = {
206 - 'CloudFunctionConfiguration': {
207 - 'Id': 'Kappa-%s-notification' % self.config['lambda']['name'],
208 - 'Events': [e for e in self.config['lambda']['s3_events']],
209 - 'CloudFunction': self._get_function_arn(),
210 - 'InvocationRole': invoke_role}}
211 - response = s3_svc.put_bucket_notification(
212 - Bucket=bucket_name,
213 - NotificationConfiguration=notification_spec)
214 - LOG.debug(response)
215 -
216 - def add_event_source(self):
217 - event_source_arn = self.config['lambda']['event_source']
218 - _, _, svc, _ = event_source_arn.split(':', 3)
219 - if svc == 'kinesis':
220 - self._add_kinesis_event_source(event_source_arn)
221 - elif svc == 's3':
222 - self._add_s3_event_source(event_source_arn)
223 - else:
224 - raise ValueError('Unsupported event source: %s' % event_source_arn)
225 -
226 - def deploy(self):
227 - self.create_update_roles(
228 - self.config['cloudformation']['stack_name'],
229 - self.config['cloudformation']['template'])
230 - self.zip_lambda_function(
231 - self.config['lambda']['zipfile_name'],
232 - self.config['lambda']['path'])
233 - self.upload_lambda_function(self.config['lambda']['zipfile_name'])
234 -
235 - def test(self):
236 - self._invoke_asynch(self.config['lambda']['test_data'])
237 -
238 - def tail(self):
239 - self._tail(self.config['lambda']['name'])
240 -
241 - def delete(self):
242 - self.delete_roles(self.config['cloudformation']['stack_name'])
243 - self.delete_lambda_function(self.config['lambda']['name'])
......
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import botocore.session
15 +
16 +
17 +class __AWS(object):
18 +
19 + def __init__(self, profile=None, region=None):
20 + self._client_cache = {}
21 + self._session = botocore.session.get_session()
22 + self._session.profile = profile
23 + self._region = region
24 +
25 + def create_client(self, client_name):
26 + if client_name not in self._client_cache:
27 + self._client_cache[client_name] = self._session.create_client(
28 + client_name, self._region)
29 + return self._client_cache[client_name]
30 +
31 +
32 +__Singleton_AWS = None
33 +
34 +
35 +def get_aws(context):
36 + global __Singleton_AWS
37 + if __Singleton_AWS is None:
38 + __Singleton_AWS = __AWS(context.profile, context.region)
39 + return __Singleton_AWS
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import logging
15 +import yaml
16 +
17 +import kappa.function
18 +import kappa.event_source
19 +import kappa.stack
20 +
21 +LOG = logging.getLogger(__name__)
22 +
23 +DebugFmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
24 +InfoFmtString = '\t%(message)s'
25 +
26 +
27 +class Context(object):
28 +
29 + def __init__(self, config_file, debug=False):
30 + if debug:
31 + self.set_logger('kappa', logging.DEBUG)
32 + else:
33 + self.set_logger('kappa', logging.INFO)
34 + self.config = yaml.load(config_file)
35 + self._stack = kappa.stack.Stack(
36 + self, self.config['cloudformation'])
37 + self.function = kappa.function.Function(
38 + self, self.config['lambda'])
39 + self.event_sources = []
40 + self._create_event_sources()
41 +
42 + @property
43 + def profile(self):
44 + return self.config.get('profile', None)
45 +
46 + @property
47 + def region(self):
48 + return self.config.get('region', None)
49 +
50 + @property
51 + def cfn_config(self):
52 + return self.config.get('cloudformation', None)
53 +
54 + @property
55 + def lambda_config(self):
56 + return self.config.get('lambda', None)
57 +
58 + @property
59 + def exec_role_arn(self):
60 + return self._stack.invoke_role_arn
61 +
62 + @property
63 + def invoke_role_arn(self):
64 + return self._stack.invoke_role_arn
65 +
66 + def debug(self):
67 + self.set_logger('kappa', logging.DEBUG)
68 +
69 + def set_logger(self, logger_name, level=logging.INFO):
70 + """
71 + Convenience function to quickly configure full debug output
72 + to go to the console.
73 + """
74 + log = logging.getLogger(logger_name)
75 + log.setLevel(level)
76 +
77 + ch = logging.StreamHandler(None)
78 + ch.setLevel(level)
79 +
80 + # create formatter
81 + if level == logging.INFO:
82 + formatter = logging.Formatter(InfoFmtString)
83 + else:
84 + formatter = logging.Formatter(DebugFmtString)
85 +
86 + # add formatter to ch
87 + ch.setFormatter(formatter)
88 +
89 + # add ch to logger
90 + log.addHandler(ch)
91 +
92 + def _create_event_sources(self):
93 + for event_source_cfg in self.config['lambda']['event_sources']:
94 + _, _, svc, _ = event_source_cfg['arn'].split(':', 3)
95 + if svc == 'kinesis':
96 + self.event_sources.append(kappa.event_source.KinesisEventSource(
97 + self, event_source_cfg))
98 + elif svc == 's3':
99 + self.event_sources.append(kappa.event_source.S3EventSource(
100 + self, event_source_cfg))
101 + else:
102 + msg = 'Unsupported event source: %s' % event_source_cfg['arn']
103 + raise ValueError(msg)
104 +
105 + def add_event_sources(self):
106 + for event_source in self.event_sources:
107 + event_source.add(self.function)
108 +
109 + def deploy(self):
110 + if self._stack.exists():
111 + self._stack.update()
112 + else:
113 + self._stack.create()
114 + self.function.upload()
115 +
116 + def test(self):
117 + self.function.test()
118 +
119 + def tail(self):
120 + return self.function.tail()
121 +
122 + def delete(self):
123 + self._stack.delete()
124 + self.function.delete()
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import logging
15 +
16 +import kappa.aws
17 +
18 +LOG = logging.getLogger(__name__)
19 +
20 +
21 +class EventSource(object):
22 +
23 + def __init__(self, context, config):
24 + self._context = context
25 + self._config = config
26 +
27 + @property
28 + def arn(self):
29 + return self._config['arn']
30 +
31 + @property
32 + def batch_size(self):
33 + return self._config.get('batch_size', 100)
34 +
35 +
36 +class KinesisEventSource(EventSource):
37 +
38 + def __init__(self, context, config):
39 + super(KinesisEventSource, self).__init__(context, config)
40 + aws = kappa.aws.get_aws(context)
41 + self._lambda = aws.create_client('lambda')
42 +
43 + def _get_uuid(self, function):
44 + uuid = None
45 + response = self._lambda.list_event_sources(
46 + FunctionName=function.name,
47 + EventSourceArn=self.arn)
48 + LOG.debug(response)
49 + if len(response['EventSources']) > 0:
50 + uuid = response['EventSources'][0]['UUID']
51 + return uuid
52 +
53 + def add(self, function):
54 + try:
55 + response = self._lambda.add_event_source(
56 + FunctionName=function.name,
57 + Role=self.invoke_role_arn,
58 + EventSource=self.arn,
59 + BatchSize=self.batch_size)
60 + LOG.debug(response)
61 + except Exception:
62 + LOG.exception('Unable to add Kinesis event source')
63 +
64 + def remove(self, function):
65 + response = None
66 + uuid = self._get_uuid(function)
67 + if uuid:
68 + response = self._lambda.remove_event_source(
69 + UUID=uuid)
70 + LOG.debug(response)
71 + return response
72 +
73 +
74 +class S3EventSource(EventSource):
75 +
76 + def __init__(self, context, config):
77 + super(S3EventSource, self).__init__(context, config)
78 + aws = kappa.aws.get_aws(context)
79 + self._s3 = aws.create_client('s3')
80 +
81 + def _make_notification_id(self, function_name):
82 + return 'Kappa-%s-notification' % function_name
83 +
84 + def _get_bucket_name(self):
85 + return self.arn.split(':')[-1]
86 +
87 + def add(self, function):
88 + notification_spec = {
89 + 'CloudFunctionConfiguration': {
90 + 'Id': self._make_notification_id(function.name),
91 + 'Events': [e for e in self.config['events']],
92 + 'CloudFunction': function.arn(),
93 + 'InvocationRole': self.invoke_role_arn}}
94 + try:
95 + response = self._s3.put_bucket_notification(
96 + Bucket=self._get_bucket_name(),
97 + NotificationConfiguration=notification_spec)
98 + LOG.debug(response)
99 + except Exception:
100 + LOG.exception('Unable to add S3 event source')
101 +
102 + def remove(self, function):
103 + response = self._s3.get_bucket_notification(
104 + Bucket=self._get_bucket_name())
105 + LOG.debug(response)
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import logging
15 +import os
16 +import zipfile
17 +
18 +import kappa.aws
19 +import kappa.log
20 +
21 +LOG = logging.getLogger(__name__)
22 +
23 +
24 +class Function(object):
25 +
26 + def __init__(self, context, config):
27 + self._context = context
28 + self._config = config
29 + aws = kappa.aws.get_aws(context)
30 + self._lambda_svc = aws.create_client('lambda')
31 + self._arn = None
32 + self._log = None
33 +
34 + @property
35 + def name(self):
36 + return self._config['name']
37 +
38 + @property
39 + def runtime(self):
40 + return self._config['runtime']
41 +
42 + @property
43 + def handler(self):
44 + return self._config['handler']
45 +
46 + @property
47 + def mode(self):
48 + return self._config['mode']
49 +
50 + @property
51 + def description(self):
52 + return self._config['description']
53 +
54 + @property
55 + def timeout(self):
56 + return self._config['timeout']
57 +
58 + @property
59 + def memory_size(self):
60 + return self._config['memory_size']
61 +
62 + @property
63 + def zipfile_name(self):
64 + return self._config['zipfile_name']
65 +
66 + @property
67 + def path(self):
68 + return self._config['path']
69 +
70 + @property
71 + def test_data(self):
72 + return self._config['test_data']
73 +
74 + @property
75 + def arn(self):
76 + if self._arn is None:
77 + try:
78 + response = self._lambda_svc.get_function_configuration(
79 + FunctionName=self.name)
80 + LOG.debug(response)
81 + self._arn = response['FunctionARN']
82 + except Exception:
83 + LOG.debug('Unable to find ARN for function: %s' % self.name)
84 + return self._arn
85 +
86 + @property
87 + def log(self):
88 + if self._log is None:
89 + log_group_name = '/aws/lambda/%s' % self.name
90 + self._log = kappa.log.Log(self._context, log_group_name)
91 + return self._log
92 +
93 + def tail(self):
94 + LOG.debug('tailing function: %s', self.name)
95 + return self.log.tail()
96 +
97 + def _zip_lambda_dir(self, zipfile_name, lambda_dir):
98 + LOG.debug('_zip_lambda_dir: lambda_dir=%s', lambda_dir)
99 + LOG.debug('zipfile_name=%s', zipfile_name)
100 + relroot = os.path.abspath(lambda_dir)
101 + with zipfile.ZipFile(zipfile_name, 'w') as zf:
102 + for root, dirs, files in os.walk(lambda_dir):
103 + zf.write(root, os.path.relpath(root, relroot))
104 + for file in files:
105 + filename = os.path.join(root, file)
106 + if os.path.isfile(filename):
107 + arcname = os.path.join(
108 + os.path.relpath(root, relroot), file)
109 + zf.write(filename, arcname)
110 +
111 + def _zip_lambda_file(self, zipfile_name, lambda_file):
112 + LOG.debug('_zip_lambda_file: lambda_file=%s', lambda_file)
113 + LOG.debug('zipfile_name=%s', zipfile_name)
114 + with zipfile.ZipFile(zipfile_name, 'w') as zf:
115 + zf.write(lambda_file)
116 +
117 + def zip_lambda_function(self, zipfile_name, lambda_fn):
118 + if os.path.isdir(lambda_fn):
119 + self._zip_lambda_dir(zipfile_name, lambda_fn)
120 + else:
121 + self._zip_lambda_file(zipfile_name, lambda_fn)
122 +
123 + def upload(self):
124 + LOG.debug('uploading %s', self.zipfile_name)
125 + self.zip_lambda_function(self.zipfile_name, self.path)
126 + with open(self.zipfile_name, 'rb') as fp:
127 + exec_role = self._context.exec_role_arn
128 + try:
129 + response = self._lambda_svc.upload_function(
130 + FunctionName=self.name,
131 + FunctionZip=fp,
132 + Runtime=self.runtime,
133 + Role=exec_role,
134 + Handler=self.handler,
135 + Mode=self.mode,
136 + Description=self.description,
137 + Timeout=self.timeout,
138 + MemorySize=self.memory_size)
139 + LOG.debug(response)
140 + except Exception:
141 + LOG.exception('Unable to upload zip file')
142 +
143 + def delete(self):
144 + LOG.debug('deleting function %s', self.name)
145 + response = self._lambda_svc.delete_function(FunctionName=self.name)
146 + LOG.debug(response)
147 + return response
148 +
149 + def invoke_asynch(self, data_file):
150 + LOG.debug('_invoke_async %s', data_file)
151 + with open(data_file) as fp:
152 + response = self._lambda_svc.invoke_async(
153 + FunctionName=self.name,
154 + InvokeArgs=fp)
155 + LOG.debug(response)
156 +
157 + def test(self):
158 + self.invoke_asynch(self.test_data)
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import logging
15 +
16 +LOG = logging.getLogger(__name__)
17 +
18 +import kappa.aws
19 +
20 +
21 +class Log(object):
22 +
23 + def __init__(self, context, log_group_name):
24 + self._context = context
25 + self.log_group_name = log_group_name
26 + aws = kappa.aws.get_aws(self._context)
27 + self._log_svc = aws.create_client('logs')
28 +
29 + def streams(self):
30 + LOG.debug('getting streams for log group: %s', self.log_group_name)
31 + response = self._log_svc.describe_log_streams(
32 + logGroupName=self.log_group_name)
33 + LOG.debug(response)
34 + return response['logStreams']
35 +
36 + def tail(self):
37 + LOG.debug('tailing log group: %s', self.log_group_name)
38 + latest_stream = None
39 + streams = self.streams()
40 + for stream in streams:
41 + if not latest_stream:
42 + latest_stream = stream
43 + elif stream['lastEventTimestamp'] > latest_stream['lastEventTimestamp']:
44 + latest_stream = stream
45 + response = self._log_svc.get_log_events(
46 + logGroupName=self.log_group_name,
47 + logStreamName=latest_stream['logStreamName'])
48 + LOG.debug(response)
49 + return response['events']
1 +# Copyright (c) 2014 Mitch Garnaat http://garnaat.org/
2 +#
3 +# Licensed under the Apache License, Version 2.0 (the "License"). You
4 +# may not use this file except in compliance with the License. A copy of
5 +# the License is located at
6 +#
7 +# http://aws.amazon.com/apache2.0/
8 +#
9 +# or in the "license" file accompanying this file. This file is
10 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 +# ANY KIND, either express or implied. See the License for the specific
12 +# language governing permissions and limitations under the License.
13 +
14 +import logging
15 +import time
16 +
17 +import kappa.aws
18 +
19 +LOG = logging.getLogger(__name__)
20 +
21 +
22 +class Stack(object):
23 +
24 + completed_states = ('CREATE_COMPLETE', 'UPDATE_COMPLETE')
25 +
26 + def __init__(self, context, config):
27 + self._context = context
28 + self._config = config
29 + aws = kappa.aws.get_aws(self._context)
30 + self._cfn = aws.create_client('cloudformation')
31 + self._iam = aws.create_client('iam')
32 +
33 + @property
34 + def name(self):
35 + return self._config['stack_name']
36 +
37 + @property
38 + def template_path(self):
39 + return self._config['template']
40 +
41 + @property
42 + def exec_role(self):
43 + return self._config['exec_role']
44 +
45 + @property
46 + def exec_role_arn(self):
47 + return self._get_role_arn(self.exec_role)
48 +
49 + @property
50 + def invoke_role(self):
51 + return self._config['invoke_role']
52 +
53 + @property
54 + def invoke_role_arn(self):
55 + return self._get_role_arn(self.invoke_role)
56 +
57 + def _get_role_arn(self, role_name):
58 + role_arn = None
59 + try:
60 + resources = self._cfn.list_stack_resources(
61 + StackName=self.name)
62 + except Exception:
63 + LOG.exception('Unable to find role ARN: %s', role_name)
64 + for resource in resources['StackResourceSummaries']:
65 + if resource['LogicalResourceId'] == role_name:
66 + role = self._iam.get_role(
67 + RoleName=resource['PhysicalResourceId'])
68 + LOG.debug(role)
69 + role_arn = role['Role']['Arn']
70 + LOG.debug('role_arn: %s', role_arn)
71 + return role_arn
72 +
73 + def exists(self):
74 + """
75 + Does Cloudformation Stack already exist?
76 + """
77 + try:
78 + response = self._cfn.describe_stacks(StackName=self.name)
79 + LOG.debug('Stack %s exists', self.name)
80 + except Exception:
81 + LOG.debug('Stack %s does not exist', self.name)
82 + response = None
83 + return response
84 +
85 + def wait(self):
86 + done = False
87 + while not done:
88 + time.sleep(1)
89 + response = self._cfn.describe_stacks(StackName=self.name)
90 + status = response['Stacks'][0]['StackStatus']
91 + LOG.debug('Stack status is: %s', status)
92 + if status in self.completed_states:
93 + done = True
94 +
95 + def create(self):
96 + LOG.debug('create_stack: stack_name=%s', self.name)
97 + template_body = open(self.template_path).read()
98 + try:
99 + self._cfn.create_stack(
100 + StackName=self.name, TemplateBody=template_body,
101 + Capabilities=['CAPABILITY_IAM'])
102 + except Exception:
103 + LOG.exception('Unable to create stack')
104 + self.wait()
105 +
106 + def update(self):
107 + LOG.debug('create_stack: stack_name=%s', self.name)
108 + template_body = open(self.template_path).read()
109 + try:
110 + self._cfn.update_stack(
111 + StackName=self.name, TemplateBody=template_body,
112 + Capabilities=['CAPABILITY_IAM'])
113 + except Exception, e:
114 + if 'ValidationError' in str(e):
115 + LOG.info('No Updates Required')
116 + else:
117 + LOG.exception('Unable to update stack')
118 + self.wait()
119 +
120 + def delete(self):
121 + LOG.debug('delete_stack: stack_name=%s', self.name)
122 + try:
123 + self._cfn.delete_stack(StackName=self.name)
124 + except Exception:
125 + LOG.exception('Unable to delete stack: %s', self.name)
1 -botocore==0.75.0 1 +botocore==0.80.0
2 click==3.3 2 click==3.3
3 PyYAML>=3.11 3 PyYAML>=3.11
4 nose==1.3.1 4 nose==1.3.1
......
...@@ -16,6 +16,8 @@ lambda: ...@@ -16,6 +16,8 @@ lambda:
16 memory_size: 128 16 memory_size: 128
17 timeout: 3 17 timeout: 3
18 mode: event 18 mode: event
19 - event_source: arn:aws:kinesis:us-east-1:084307701560:stream/lambdastream 19 + event_sources:
20 + -
21 + arn: arn:aws:kinesis:us-east-1:084307701560:stream/lambdastream
20 test_data: input.json 22 test_data: input.json
21 23
...\ No newline at end of file ...\ No newline at end of file
......
...@@ -17,6 +17,8 @@ lambda: ...@@ -17,6 +17,8 @@ lambda:
17 timeout: 3 17 timeout: 3
18 mode: event 18 mode: event
19 test_data: input.json 19 test_data: input.json
20 - event_source: arn:aws:s3:::sourcebucket
21 - s3_events:
22 - - s3:ObjectCreated:*
...\ No newline at end of file ...\ No newline at end of file
20 + event_sources:
21 + -
22 + arn: arn:aws:s3:::test-1245812163
23 + events:
24 + - s3:ObjectCreated:*
......
...@@ -5,7 +5,7 @@ from setuptools import setup, find_packages ...@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
5 import os 5 import os
6 6
7 requires = [ 7 requires = [
8 - 'botocore==0.75.0', 8 + 'botocore==0.80.0',
9 'click==3.3', 9 'click==3.3',
10 'PyYAML>=3.11' 10 'PyYAML>=3.11'
11 ] 11 ]
......