diff -Nru python-boto-2.34.0/bin/glacier python-boto-2.38.0/bin/glacier --- python-boto-2.34.0/bin/glacier 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/bin/glacier 2015-04-09 18:57:51.000000000 +0000 @@ -111,9 +111,12 @@ glacier_vault = layer2.get_vault(vault_name) for filename in filenames: if isfile(filename): - print('Uploading %s to %s' % (filename, vault_name)) - glacier_vault.upload_archive(filename, description = basename(filename)) - + sys.stdout.write('Uploading %s to %s...' % (filename, vault_name)) + sys.stdout.flush() + archive_id = glacier_vault.upload_archive( + filename, + description = basename(filename)) + print(' done. Vault returned ArchiveID %s' % archive_id) def main(): if len(sys.argv) < 2: diff -Nru python-boto-2.34.0/boto/auth.py python-boto-2.38.0/boto/auth.py --- python-boto-2.34.0/boto/auth.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/auth.py 2015-04-09 18:57:51.000000000 +0000 @@ -51,6 +51,16 @@ sha256 = None +# Region detection strings to determine if SigV4 should be used +# by default. +SIGV4_DETECT = [ + '.cn-', + # In eu-central we support both host styles for S3 + '.eu-central', + '-eu-central', +] + + class HmacKeys(object): """Key based Auth handler helper.""" @@ -311,6 +321,8 @@ in the StringToSign. """ host_header_value = self.host_header(self.host, http_request) + if http_request.headers.get('Host'): + host_header_value = http_request.headers['Host'] headers_to_sign = {'Host': host_header_value} for name, value in http_request.headers.items(): lname = name.lower() @@ -359,7 +371,7 @@ for header in headers_to_sign: c_name = header.lower().strip() - raw_value = headers_to_sign[header] + raw_value = str(headers_to_sign[header]) if '"' in raw_value: c_value = raw_value.strip() else: @@ -761,20 +773,22 @@ urllib.parse.urlencode(req.params)) -class QueryAuthHandler(AuthHandler): +class STSAnonHandler(AuthHandler): """ Provides pure query construction (no actual signing). - Mostly useful for STS' ``assume_role_with_web_identity``. - - Does **NOT** escape query string values! + Used for making anonymous STS request for operations like + ``assume_role_with_web_identity``. """ - capability = ['pure-query'] + capability = ['sts-anon'] def _escape_value(self, value): - # Would normally be ``return urllib.parse.quote(value)``. - return value + # This is changed from a previous version because this string is + # being passed to the query string and query strings must + # be url encoded. In particular STS requires the saml_response to + # be urlencoded when calling assume_role_with_saml. + return urllib.parse.quote(value) def _build_query_string(self, params): keys = list(params.keys()) @@ -790,13 +804,11 @@ qs = self._build_query_string( http_request.params ) - boto.log.debug('query_string: %s' % qs) - headers['Content-Type'] = 'application/json; charset=UTF-8' - http_request.body = '' - # if this is a retried request, the qs from the previous try will - # already be there, we need to get rid of that and rebuild it - http_request.path = http_request.path.split('?')[0] - http_request.path = http_request.path + '?' + qs + boto.log.debug('query_string in body: %s' % qs) + headers['Content-Type'] = 'application/x-www-form-urlencoded' + # This will be a POST so the query string should go into the body + # as opposed to being in the uri + http_request.body = qs class QuerySignatureHelper(HmacKeys): @@ -1000,9 +1012,9 @@ # ``boto/iam/connection.py``, as several things there are also # endpoint-related. if getattr(self.region, 'endpoint', ''): - if '.cn-' in self.region.endpoint or \ - '.eu-central' in self.region.endpoint: - return ['hmac-v4'] + for test in SIGV4_DETECT: + if test in self.region.endpoint: + return ['hmac-v4'] return func(self) return _wrapper @@ -1020,8 +1032,9 @@ # If you're making changes here, you should also check # ``boto/iam/connection.py``, as several things there are also # endpoint-related. - if '.cn-' in self.host or '.eu-central' in self.host: - return ['hmac-v4-s3'] + for test in SIGV4_DETECT: + if test in self.host: + return ['hmac-v4-s3'] return func(self) return _wrapper diff -Nru python-boto-2.34.0/boto/awslambda/exceptions.py python-boto-2.38.0/boto/awslambda/exceptions.py --- python-boto-2.34.0/boto/awslambda/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/awslambda/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidRequestContentException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InvalidParameterValueException(BotoServerError): + pass + + +class ServiceException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/awslambda/__init__.py python-boto-2.38.0/boto/awslambda/__init__.py --- python-boto-2.34.0/boto/awslambda/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/awslambda/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Lambda service. + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return get_regions('awslambda', + connection_cls=AWSLambdaConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/awslambda/layer1.py python-boto-2.38.0/boto/awslambda/layer1.py --- python-boto-2.34.0/boto/awslambda/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/awslambda/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,517 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os + +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.awslambda import exceptions + + +class AWSLambdaConnection(AWSAuthConnection): + """ + AWS Lambda + **Overview** + + This is the AWS Lambda API Reference. The AWS Lambda Developer + Guide provides additional information. For the service overview, + go to `What is AWS Lambda`_, and for information about how the + service works, go to `AWS LambdaL How it Works`_ in the AWS Lambda + Developer Guide. + """ + APIVersion = "2014-11-11" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "lambda.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestContentException": exceptions.InvalidRequestContentException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InvalidParameterValueException": exceptions.InvalidParameterValueException, + "ServiceException": exceptions.ServiceException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(AWSLambdaConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_event_source(self, event_source, function_name, role, + batch_size=None, parameters=None): + """ + Identifies an Amazon Kinesis stream as the event source for an + AWS Lambda function. AWS Lambda invokes the specified function + when records are posted to the stream. + + This is the pull model, where AWS Lambda invokes the function. + For more information, go to `AWS LambdaL How it Works`_ in the + AWS Lambda Developer Guide. + + This association between an Amazon Kinesis stream and an AWS + Lambda function is called the event source mapping. You + provide the configuration information (for example, which + stream to read from and which AWS Lambda function to invoke) + for the event source mapping in the request body. + + This operation requires permission for the `iam:PassRole` + action for the IAM role. It also requires permission for the + `lambda:AddEventSource` action. + + :type event_source: string + :param event_source: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream that is the event source. Any record added to this + stream causes AWS Lambda to invoke your Lambda function. AWS Lambda + POSTs the Amazon Kinesis event, containing records, to your Lambda + function as JSON. + + :type function_name: string + :param function_name: The Lambda function to invoke when AWS Lambda + detects an event on the stream. + + :type role: string + :param role: The ARN of the IAM role (invocation role) that AWS Lambda + can assume to read from the stream and invoke the function. + + :type batch_size: integer + :param batch_size: The largest number of records that AWS Lambda will + give to your function in a single event. The default is 100 + records. + + :type parameters: map + :param parameters: A map (key-value pairs) defining the configuration + for AWS Lambda to use when reading the event source. Currently, AWS + Lambda supports only the `InitialPositionInStream` key. The valid + values are: "TRIM_HORIZON" and "LATEST". The default value is + "TRIM_HORIZON". For more information, go to `ShardIteratorType`_ in + the Amazon Kinesis Service API Reference. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = { + 'EventSource': event_source, + 'FunctionName': function_name, + 'Role': role, + } + headers = {} + query_params = {} + if batch_size is not None: + params['BatchSize'] = batch_size + if parameters is not None: + params['Parameters'] = parameters + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def delete_function(self, function_name): + """ + Deletes the specified Lambda function code and configuration. + + This operation requires permission for the + `lambda:DeleteFunction` action. + + :type function_name: string + :param function_name: The Lambda function to delete. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('DELETE', uri, expected_status=204) + + def get_event_source(self, uuid): + """ + Returns configuration information for the specified event + source mapping (see AddEventSource). + + This operation requires permission for the + `lambda:GetEventSource` action. + + :type uuid: string + :param uuid: The AWS Lambda assigned ID of the event source mapping. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('GET', uri, expected_status=200) + + def get_function(self, function_name): + """ + Returns the configuration information of the Lambda function + and a presigned URL link to the .zip file you uploaded with + UploadFunction so you can download the .zip file. Note that + the URL is valid for up to 10 minutes. The configuration + information is the same information you provided as parameters + when uploading the function. + + This operation requires permission for the + `lambda:GetFunction` action. + + :type function_name: string + :param function_name: The Lambda function name. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def get_function_configuration(self, function_name): + """ + Returns the configuration information of the Lambda function. + This the same information you provided as parameters when + uploading the function by using UploadFunction. + + This operation requires permission for the + `lambda:GetFunctionConfiguration` operation. + + :type function_name: string + :param function_name: The name of the Lambda function for which you + want to retrieve the configuration information. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def invoke_async(self, function_name, invoke_args): + """ + Submits an invocation request to AWS Lambda. Upon receiving + the request, Lambda executes the specified function + asynchronously. To see the logs generated by the Lambda + function execution, see the CloudWatch logs console. + + This operation requires permission for the + `lambda:InvokeAsync` action. + + :type function_name: string + :param function_name: The Lambda function name. + + :type invoke_args: blob + :param invoke_args: JSON that you want to provide to your Lambda + function as input. + + """ + uri = '/2014-11-13/functions/{0}/invoke-async/'.format(function_name) + headers = {} + query_params = {} + try: + content_length = str(len(invoke_args)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + invoke_args.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``invoke_args`` must be seekable." + ) + content_length = str(os.fstat(invoke_args.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('POST', uri, expected_status=202, + data=invoke_args, headers=headers, + params=query_params) + + def list_event_sources(self, event_source_arn=None, function_name=None, + marker=None, max_items=None): + """ + Returns a list of event source mappings. For each mapping, the + API returns configuration information (see AddEventSource). + You can optionally specify filters to retrieve specific event + source mappings. + + This operation requires permission for the + `lambda:ListEventSources` action. + + :type event_source_arn: string + :param event_source_arn: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream. + + :type function_name: string + :param function_name: The name of the AWS Lambda function. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListEventSources` operation. If present, specifies + to continue the list from where the returning call left off. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of + event sources to return in response. This value must be greater + than 0. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = {} + headers = {} + query_params = {} + if event_source_arn is not None: + query_params['EventSource'] = event_source_arn + if function_name is not None: + query_params['FunctionName'] = function_name + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_functions(self, marker=None, max_items=None): + """ + Returns a list of your Lambda functions. For each function, + the response includes the function configuration information. + You must use GetFunction to retrieve the code for your + function. + + This operation requires permission for the + `lambda:ListFunctions` action. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListFunctions` operation. If present, indicates + where to continue the listing. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of AWS + Lambda functions to return in response. This parameter value must + be greater than 0. + + """ + + uri = '/2014-11-13/functions/' + params = {} + headers = {} + query_params = {} + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def remove_event_source(self, uuid): + """ + Removes an event source mapping. This means AWS Lambda will no + longer invoke the function for events in the associated + source. + + This operation requires permission for the + `lambda:RemoveEventSource` action. + + :type uuid: string + :param uuid: The event source mapping ID. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('DELETE', uri, expected_status=204) + + def update_function_configuration(self, function_name, role=None, + handler=None, description=None, + timeout=None, memory_size=None): + """ + Updates the configuration parameters for the specified Lambda + function by using the values provided in the request. You + provide only the parameters you want to change. This operation + must only be used on an existing Lambda function and cannot be + used to update the function's code. + + This operation requires permission for the + `lambda:UpdateFunctionConfiguration` action. + + :type function_name: string + :param function_name: The name of the Lambda function. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + will assume when it executes your function. + + :type handler: string + :param handler: The function that Lambda calls to begin executing your + function. For Node.js, it is the module-name.export value in your + function. + + :type description: string + :param description: A short user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, a database operation + might need less memory compared to an image processing function. + The default value is 128 MB. The value must be a multiple of 64 MB. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + params = {} + headers = {} + query_params = {} + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + return self.make_request('PUT', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_function(self, function_name, function_zip, runtime, role, + handler, mode, description=None, timeout=None, + memory_size=None): + """ + Creates a new Lambda function or updates an existing function. + The function metadata is created from the request parameters, + and the code for the function is provided by a .zip file in + the request body. If the function name already exists, the + existing Lambda function is updated with the new code and + metadata. + + This operation requires permission for the + `lambda:UploadFunction` action. + + :type function_name: string + :param function_name: The name you want to assign to the function you + are uploading. The function names appear in the console and are + returned in the ListFunctions API. Function names are used to + specify functions to other AWS Lambda APIs, such as InvokeAsync. + + :type function_zip: blob + :param function_zip: A .zip file containing your packaged source code. + For more information about creating a .zip file, go to `AWS LambdaL + How it Works`_ in the AWS Lambda Developer Guide. + + :type runtime: string + :param runtime: The runtime environment for the Lambda function you are + uploading. Currently, Lambda supports only "nodejs" as the runtime. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + assumes when it executes your function to access any other Amazon + Web Services (AWS) resources. + + :type handler: string + :param handler: The function that Lambda calls to begin execution. For + Node.js, it is the module-name . export value in your function. + + :type mode: string + :param mode: How the Lambda function will be invoked. Lambda supports + only the "event" mode. + + :type description: string + :param description: A short, user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, database operation might + need less memory compared to image processing function. The default + value is 128 MB. The value must be a multiple of 64 MB. + + """ + uri = '/2014-11-13/functions/{0}'.format(function_name) + headers = {} + query_params = {} + if runtime is not None: + query_params['Runtime'] = runtime + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if mode is not None: + query_params['Mode'] = mode + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + + try: + content_length = str(len(function_zip)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + function_zip.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``function_zip`` must be seekable." + ) + content_length = str(os.fstat(function_zip.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('PUT', uri, expected_status=201, + data=function_zip, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = response.read().decode('utf-8') + if body: + body = json.loads(body) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff -Nru python-boto-2.34.0/boto/cloudformation/connection.py python-boto-2.38.0/boto/cloudformation/connection.py --- python-boto-2.34.0/boto/cloudformation/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudformation/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -98,7 +98,8 @@ def _build_create_or_update_params(self, stack_name, template_body, template_url, parameters, disable_rollback, timeout_in_minutes, notification_arns, capabilities, on_failure, stack_policy_body, - stack_policy_url, tags, stack_policy_during_update_body=None, + stack_policy_url, tags, use_previous_template=None, + stack_policy_during_update_body=None, stack_policy_during_update_url=None): """ Helper that creates JSON parameters needed by a Stack Create or @@ -117,20 +118,25 @@ :param template_body: Structure containing the template body. (For more information, go to `Template Anatomy`_ in the AWS CloudFormation User Guide.) - Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are - passed, only `TemplateBody` is used. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. :type template_url: string :param template_url: Location of file containing the template body. The URL must point to a template (max size: 307,200 bytes) located in an S3 bucket in the same region as the stack. For more information, go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. - Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are - passed, only `TemplateBody` is used. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. :type parameters: list :param parameters: A list of key/value tuples that specify input - parameters for the stack. + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. :type disable_rollback: boolean :param disable_rollback: Set to `True` to disable rollback of the stack @@ -186,6 +192,13 @@ propagated to EC2 resources that are created as part of the stack. A maximum number of 10 tags can be specified. + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. + :type stack_policy_during_update_body: string :param stack_policy_during_update_body: Structure containing the temporary overriding stack policy body. If you pass @@ -217,13 +230,23 @@ params['TemplateBody'] = template_body if template_url: params['TemplateURL'] = template_url + if use_previous_template is not None: + params['UsePreviousTemplate'] = self.encode_bool(use_previous_template) if template_body and template_url: boto.log.warning("If both TemplateBody and TemplateURL are" " specified, only TemplateBody will be honored by the API") if parameters and len(parameters) > 0: - for i, (key, value) in enumerate(parameters): + for i, parameter_tuple in enumerate(parameters): + key, value = parameter_tuple[:2] + use_previous = (parameter_tuple[2] + if len(parameter_tuple) > 2 else False) params['Parameters.member.%d.ParameterKey' % (i + 1)] = key - params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + if use_previous: + params['Parameters.member.%d.UsePreviousValue' + % (i + 1)] = self.encode_bool(use_previous) + else: + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + if capabilities: for i, value in enumerate(capabilities): params['Capabilities.member.%d' % (i + 1)] = value @@ -383,6 +406,7 @@ def update_stack(self, stack_name, template_body=None, template_url=None, parameters=None, notification_arns=None, disable_rollback=False, timeout_in_minutes=None, capabilities=None, tags=None, + use_previous_template=None, stack_policy_during_update_body=None, stack_policy_during_update_url=None, stack_policy_body=None, stack_policy_url=None): @@ -421,20 +445,31 @@ :param template_body: Structure containing the template body. (For more information, go to `Template Anatomy`_ in the AWS CloudFormation User Guide.) - Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are - passed, only `TemplateBody` is used. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. :type template_url: string :param template_url: Location of file containing the template body. The - URL must point to a template located in an S3 bucket in the same - region as the stack. For more information, go to `Template - Anatomy`_ in the AWS CloudFormation User Guide. - Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are - passed, only `TemplateBody` is used. + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. :type parameters: list :param parameters: A list of key/value tuples that specify input - parameters for the stack. + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. :type notification_arns: list :param notification_arns: The Simple Notification Service (SNS) topic @@ -497,8 +532,8 @@ params = self._build_create_or_update_params(stack_name, template_body, template_url, parameters, disable_rollback, timeout_in_minutes, notification_arns, capabilities, None, stack_policy_body, - stack_policy_url, tags, stack_policy_during_update_body, - stack_policy_during_update_url) + stack_policy_url, tags, use_previous_template, + stack_policy_during_update_body, stack_policy_during_update_url) body = self._do_request('UpdateStack', params, '/', 'POST') return body['UpdateStackResponse']['UpdateStackResult']['StackId'] @@ -884,5 +919,4 @@ params['StackPolicyURL'] = stack_policy_url response = self._do_request('SetStackPolicy', params, '/', 'POST') - return response['SetStackPolicyResponse']\ - ['SetStackPolicyResult'] + return response['SetStackPolicyResponse'] diff -Nru python-boto-2.34.0/boto/cloudhsm/exceptions.py python-boto-2.38.0/boto/cloudhsm/exceptions.py --- python-boto-2.34.0/boto/cloudhsm/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudhsm/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,35 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidRequestException(BotoServerError): + pass + + +class CloudHsmServiceException(BotoServerError): + pass + + +class CloudHsmInternalException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/cloudhsm/__init__.py python-boto-2.38.0/boto/cloudhsm/__init__.py --- python-boto-2.34.0/boto/cloudhsm/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudhsm/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS CloudHSM service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return get_regions('cloudhsm', connection_cls=CloudHSMConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/cloudhsm/layer1.py python-boto-2.38.0/boto/cloudhsm/layer1.py --- python-boto-2.34.0/boto/cloudhsm/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudhsm/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,448 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudhsm import exceptions + + +class CloudHSMConnection(AWSQueryConnection): + """ + AWS CloudHSM Service + """ + APIVersion = "2014-05-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com" + ServiceName = "CloudHSM" + TargetPrefix = "CloudHsmFrontendService" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestException": exceptions.InvalidRequestException, + "CloudHsmServiceException": exceptions.CloudHsmServiceException, + "CloudHsmInternalException": exceptions.CloudHsmInternalException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudHSMConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_hapg(self, label): + """ + Creates a high-availability partition group. A high- + availability partition group is a group of partitions that + spans multiple physical HSMs. + + :type label: string + :param label: The label of the new high-availability partition group. + + """ + params = {'Label': label, } + return self.make_request(action='CreateHapg', + body=json.dumps(params)) + + def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type, + eni_ip=None, external_id=None, client_token=None, + syslog_ip=None): + """ + Creates an uninitialized HSM instance. Running this command + provisions an HSM appliance and will result in charges to your + AWS account for the HSM. + + :type subnet_id: string + :param subnet_id: The identifier of the subnet in your VPC in which to + place the HSM. + + :type ssh_key: string + :param ssh_key: The SSH public key to install on the HSM. + + :type eni_ip: string + :param eni_ip: The IP address to assign to the HSM's ENI. + + :type iam_role_arn: string + :param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM + service to allocate an ENI on your behalf. + + :type external_id: string + :param external_id: The external ID from **IamRoleArn**, if present. + + :type subscription_type: string + :param subscription_type: The subscription type. + + :type client_token: string + :param client_token: A user-defined token to ensure idempotence. + Subsequent calls to this action with the same token will be + ignored. + + :type syslog_ip: string + :param syslog_ip: The IP address for the syslog monitoring server. + + """ + params = { + 'SubnetId': subnet_id, + 'SshKey': ssh_key, + 'IamRoleArn': iam_role_arn, + 'SubscriptionType': subscription_type, + } + if eni_ip is not None: + params['EniIp'] = eni_ip + if external_id is not None: + params['ExternalId'] = external_id + if client_token is not None: + params['ClientToken'] = client_token + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='CreateHsm', + body=json.dumps(params)) + + def create_luna_client(self, certificate, label=None): + """ + Creates an HSM client. + + :type label: string + :param label: The label for the client. + + :type certificate: string + :param certificate: The contents of a Base64-Encoded X.509 v3 + certificate to be installed on the HSMs used by this client. + + """ + params = {'Certificate': certificate, } + if label is not None: + params['Label'] = label + return self.make_request(action='CreateLunaClient', + body=json.dumps(params)) + + def delete_hapg(self, hapg_arn): + """ + Deletes a high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + delete. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DeleteHapg', + body=json.dumps(params)) + + def delete_hsm(self, hsm_arn): + """ + Deletes an HSM. Once complete, this operation cannot be undone + and your key material cannot be recovered. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to delete. + + """ + params = {'HsmArn': hsm_arn, } + return self.make_request(action='DeleteHsm', + body=json.dumps(params)) + + def delete_luna_client(self, client_arn): + """ + Deletes a client. + + :type client_arn: string + :param client_arn: The ARN of the client to delete. + + """ + params = {'ClientArn': client_arn, } + return self.make_request(action='DeleteLunaClient', + body=json.dumps(params)) + + def describe_hapg(self, hapg_arn): + """ + Retrieves information about a high-availability partition + group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + describe. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DescribeHapg', + body=json.dumps(params)) + + def describe_hsm(self, hsm_arn=None, hsm_serial_number=None): + """ + Retrieves information about an HSM. You can identify the HSM + by its ARN or its serial number. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM. Either the HsmArn or the + SerialNumber parameter must be specified. + + :type hsm_serial_number: string + :param hsm_serial_number: The serial number of the HSM. Either the + HsmArn or the HsmSerialNumber parameter must be specified. + + """ + params = {} + if hsm_arn is not None: + params['HsmArn'] = hsm_arn + if hsm_serial_number is not None: + params['HsmSerialNumber'] = hsm_serial_number + return self.make_request(action='DescribeHsm', + body=json.dumps(params)) + + def describe_luna_client(self, client_arn=None, + certificate_fingerprint=None): + """ + Retrieves information about an HSM client. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate_fingerprint: string + :param certificate_fingerprint: The certificate fingerprint. + + """ + params = {} + if client_arn is not None: + params['ClientArn'] = client_arn + if certificate_fingerprint is not None: + params['CertificateFingerprint'] = certificate_fingerprint + return self.make_request(action='DescribeLunaClient', + body=json.dumps(params)) + + def get_config(self, client_arn, client_version, hapg_list): + """ + Gets the configuration files necessary to connect to all high + availability partition groups the client is associated with. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type client_version: string + :param client_version: The client version. + + :type hapg_list: list + :param hapg_list: A list of ARNs that identify the high-availability + partition groups that are associated with the client. + + """ + params = { + 'ClientArn': client_arn, + 'ClientVersion': client_version, + 'HapgList': hapg_list, + } + return self.make_request(action='GetConfig', + body=json.dumps(params)) + + def list_available_zones(self): + """ + Lists the Availability Zones that have available AWS CloudHSM + capacity. + + + """ + params = {} + return self.make_request(action='ListAvailableZones', + body=json.dumps(params)) + + def list_hapgs(self, next_token=None): + """ + Lists the high-availability partition groups for the account. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHapgs to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHapgs. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHapgs', + body=json.dumps(params)) + + def list_hsms(self, next_token=None): + """ + Retrieves the identifiers of all of the HSMs provisioned for + the current customer. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHsms to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHsms. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHsms', + body=json.dumps(params)) + + def list_luna_clients(self, next_token=None): + """ + Lists all of the clients. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListLunaClients to retrieve the next set of + items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListLunaClients. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListLunaClients', + body=json.dumps(params)) + + def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None): + """ + Modifies an existing high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + modify. + + :type label: string + :param label: The new label for the high-availability partition group. + + :type partition_serial_list: list + :param partition_serial_list: The list of partition serial numbers to + make members of the high-availability partition group. + + """ + params = {'HapgArn': hapg_arn, } + if label is not None: + params['Label'] = label + if partition_serial_list is not None: + params['PartitionSerialList'] = partition_serial_list + return self.make_request(action='ModifyHapg', + body=json.dumps(params)) + + def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None, + iam_role_arn=None, external_id=None, syslog_ip=None): + """ + Modifies an HSM. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to modify. + + :type subnet_id: string + :param subnet_id: The new identifier of the subnet that the HSM is in. + + :type eni_ip: string + :param eni_ip: The new IP address for the elastic network interface + attached to the HSM. + + :type iam_role_arn: string + :param iam_role_arn: The new IAM role ARN. + + :type external_id: string + :param external_id: The new external ID. + + :type syslog_ip: string + :param syslog_ip: The new IP address for the syslog monitoring server. + + """ + params = {'HsmArn': hsm_arn, } + if subnet_id is not None: + params['SubnetId'] = subnet_id + if eni_ip is not None: + params['EniIp'] = eni_ip + if iam_role_arn is not None: + params['IamRoleArn'] = iam_role_arn + if external_id is not None: + params['ExternalId'] = external_id + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='ModifyHsm', + body=json.dumps(params)) + + def modify_luna_client(self, client_arn, certificate): + """ + Modifies the certificate used by the client. + + This action can potentially start a workflow to install the + new certificate on the client's HSMs. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate: string + :param certificate: The new certificate for the client. + + """ + params = { + 'ClientArn': client_arn, + 'Certificate': certificate, + } + return self.make_request(action='ModifyLunaClient', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff -Nru python-boto-2.34.0/boto/cloudsearch2/document.py python-boto-2.38.0/boto/cloudsearch2/document.py --- python-boto-2.34.0/boto/cloudsearch2/document.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearch2/document.py 2015-04-09 18:57:51.000000000 +0000 @@ -25,6 +25,7 @@ from boto.compat import json import requests import boto +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection class SearchServiceException(Exception): @@ -93,11 +94,25 @@ self.documents_batch = [] self._sdf = None - # Copy proxy settings from connection - if self.domain and self.domain.layer1 and self.domain.layer1.use_proxy: - self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()} - else: - self.proxy = {} + # Copy proxy settings from connection and check if request should be signed + self.proxy = {} + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()} + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + # Create a domain connection to send signed requests + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) def add(self, _id, fields): """ @@ -164,6 +179,26 @@ self._sdf = key_obj.get_contents_as_string() + def _commit_with_auth(self, sdf, api_version): + return self.domain_connection.upload_documents(sdf, 'application/json') + + def _commit_without_auth(self, sdf, api_version): + url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + session.proxies = self.proxy + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + + resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + return resp + def commit(self): """ Actually send an SDF to CloudSearch for processing @@ -184,24 +219,15 @@ boto.log.error(sdf[index - 100:index + 100]) api_version = '2013-01-01' - if self.domain: + if self.domain and self.domain.layer1: api_version = self.domain.layer1.APIVersion - url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) - # Keep-alive is automatic in a post-1.0 requests world. - session = requests.Session() - session.proxies = self.proxy - adapter = requests.adapters.HTTPAdapter( - pool_connections=20, - pool_maxsize=50, - max_retries=5 - ) - session.mount('http://', adapter) - session.mount('https://', adapter) - r = session.post(url, data=sdf, - headers={'Content-Type': 'application/json'}) + if self.sign_request: + r = self._commit_with_auth(sdf, api_version) + else: + r = self._commit_without_auth(sdf, api_version) - return CommitResponse(r, self, sdf) + return CommitResponse(r, self, sdf, signed_request=self.sign_request) class CommitResponse(object): @@ -219,20 +245,24 @@ :raises: :class:`boto.cloudsearch2.document.EncodingError` :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` """ - def __init__(self, response, doc_service, sdf): + def __init__(self, response, doc_service, sdf, signed_request=False): self.response = response self.doc_service = doc_service self.sdf = sdf + self.signed_request = signed_request - _body = response.content.decode('utf-8') + if self.signed_request: + self.content = response + else: + _body = response.content.decode('utf-8') - try: - self.content = json.loads(_body) - except: - boto.log.error('Error indexing documents.\nResponse Content:\n{0}' - '\n\nSDF:\n{1}'.format(_body, self.sdf)) - raise boto.exception.BotoServerError(self.response.status_code, '', - body=_body) + try: + self.content = json.loads(_body) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}' + '\n\nSDF:\n{1}'.format(_body, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=_body) self.status = self.content['status'] if self.status == 'error': @@ -266,7 +296,10 @@ if d['type'] == type_]) if response_num != commit_num: - boto.log.debug(self.response.content) + if self.signed_request: + boto.log.debug(self.response) + else: + boto.log.debug(self.response.content) # There will always be a commit mismatch error if there is any # errors on cloudsearch. self.errors gets lost when this # CommitMismatchError is raised. Whoever is using boto has no idea diff -Nru python-boto-2.34.0/boto/cloudsearch2/layer1.py python-boto-2.38.0/boto/cloudsearch2/layer1.py --- python-boto-2.34.0/boto/cloudsearch2/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearch2/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -56,7 +56,6 @@ "BaseException": exceptions.BaseException, } - def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: @@ -66,6 +65,9 @@ if 'host' not in kwargs or kwargs['host'] is None: kwargs['host'] = region.endpoint + sign_request = kwargs.pop('sign_request', False) + self.sign_request = sign_request + super(CloudSearchConnection, self).__init__(**kwargs) self.region = region diff -Nru python-boto-2.34.0/boto/cloudsearch2/layer2.py python-boto-2.38.0/boto/cloudsearch2/layer2.py --- python-boto-2.34.0/boto/cloudsearch2/layer2.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearch2/layer2.py 2015-04-09 18:57:51.000000000 +0000 @@ -32,7 +32,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, host=None, debug=0, session_token=None, region=None, - validate_certs=True): + validate_certs=True, sign_request=False): if isinstance(region, six.string_types): import boto.cloudsearch2 @@ -52,7 +52,8 @@ debug=debug, security_token=session_token, region=region, - validate_certs=validate_certs) + validate_certs=validate_certs, + sign_request=sign_request) def list_domains(self, domain_names=None): """ diff -Nru python-boto-2.34.0/boto/cloudsearch2/search.py python-boto-2.38.0/boto/cloudsearch2/search.py --- python-boto-2.34.0/boto/cloudsearch2/search.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearch2/search.py 2015-04-09 18:57:51.000000000 +0000 @@ -23,6 +23,7 @@ from math import ceil from boto.compat import json, map, six import requests +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection SIMPLE = 'simple' STRUCTURED = 'structured' @@ -144,6 +145,62 @@ return params + def to_domain_connection_params(self): + """ + Transform search parameters from instance properties to a dictionary + that CloudSearchDomainConnection can accept + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['query_parser'] = self.parser + + if self.fq: + params['filter_query'] = self.fq + + if self.expr: + expr = {} + for k, v in six.iteritems(self.expr): + expr['expr.%s' % k] = v + + params['expr'] = expr + + if self.facet: + facet = {} + for k, v in six.iteritems(self.facet): + if not isinstance(v, six.string_types): + v = json.dumps(v) + facet['facet.%s' % k] = v + + params['facet'] = facet + + if self.highlight: + highlight = {} + for k, v in six.iteritems(self.highlight): + highlight['highlight.%s' % k] = v + + params['highlight'] = highlight + + if self.options: + params['query_options'] = self.options + + if self.return_fields: + params['ret'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + class SearchConnection(object): @@ -152,13 +209,28 @@ self.endpoint = endpoint self.session = requests.Session() - # Copy proxy settings from connection - if self.domain and self.domain.layer1 and self.domain.layer1.use_proxy: - self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth() - + # Endpoint needs to be set before initializing CloudSearchDomainConnection if not endpoint: self.endpoint = domain.search_service_endpoint + # Copy proxy settings from connection and check if request should be signed + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth() + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) + def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None, size=10, start=0, facet=None, highlight=None, sort=None, partial=None, options=None): @@ -263,6 +335,15 @@ partial=partial, options=options) return self(query) + def _search_with_auth(self, params): + return self.domain_connection.search(params.pop("q", ""), **params) + + def _search_without_auth(self, params, api_version): + url = "http://%s/%s/search" % (self.endpoint, api_version) + resp = self.session.get(url, params=params) + + return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code} + def __call__(self, query): """Make a call to CloudSearch @@ -273,26 +354,30 @@ :return: search results """ api_version = '2013-01-01' - if self.domain: + if self.domain and self.domain.layer1: api_version = self.domain.layer1.APIVersion - url = "http://%s/%s/search" % (self.endpoint, api_version) - params = query.to_params() - r = self.session.get(url, params=params) - _body = r.content.decode('utf-8') - try: - data = json.loads(_body) - except ValueError: - if r.status_code == 403: - msg = '' - import re - g = re.search('

403 Forbidden

([^<]+)<', _body) - try: - msg = ': %s' % (g.groups()[0].strip()) - except AttributeError: - pass - raise SearchServiceException('Authentication error from Amazon%s' % msg) - raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query) + if self.sign_request: + data = self._search_with_auth(query.to_domain_connection_params()) + else: + r = self._search_without_auth(query.to_params(), api_version) + + _body = r['body'] + _status_code = r['status_code'] + + try: + data = json.loads(_body) + except ValueError: + if _status_code == 403: + msg = '' + import re + g = re.search('

403 Forbidden

([^<]+)<', _body) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query) if 'messages' in data and 'error' in data: for m in data['messages']: diff -Nru python-boto-2.34.0/boto/cloudsearchdomain/exceptions.py python-boto-2.38.0/boto/cloudsearchdomain/exceptions.py --- python-boto-2.34.0/boto/cloudsearchdomain/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearchdomain/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,30 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class SearchException(BotoServerError): + pass + + +class DocumentServiceException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/cloudsearchdomain/__init__.py python-boto-2.38.0/boto/cloudsearchdomain/__init__.py --- python-boto-2.34.0/boto/cloudsearchdomain/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearchdomain/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch Domain service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return get_regions('cloudsearchdomain', + connection_cls=CloudSearchDomainConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/cloudsearchdomain/layer1.py python-boto-2.38.0/boto/cloudsearchdomain/layer1.py --- python-boto-2.34.0/boto/cloudsearchdomain/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/cloudsearchdomain/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,540 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.cloudsearchdomain import exceptions + + +class CloudSearchDomainConnection(AWSAuthConnection): + """ + You use the AmazonCloudSearch2013 API to upload documents to a + search domain and search those documents. + + The endpoints for submitting `UploadDocuments`, `Search`, and + `Suggest` requests are domain-specific. To get the endpoints for + your domain, use the Amazon CloudSearch configuration service + `DescribeDomains` action. The domain endpoints are also displayed + on the domain dashboard in the Amazon CloudSearch console. You + submit suggest requests to the search endpoint. + + For more information, see the `Amazon CloudSearch Developer + Guide`_. + """ + APIVersion = "2013-01-01" + AuthServiceName = 'cloudsearch' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "SearchException": exceptions.SearchException, + "DocumentServiceException": exceptions.DocumentServiceException, + } + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + if kwargs.get('host', None) is None: + raise ValueError( + 'The argument, host, must be provided when creating a ' + 'CloudSearchDomainConnection because its methods require the ' + 'specific domain\'s endpoint in order to successfully make ' + 'requests to that CloudSearch Domain.' + ) + super(CloudSearchDomainConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def search(self, query, cursor=None, expr=None, facet=None, + filter_query=None, highlight=None, partial=None, + query_options=None, query_parser=None, ret=None, size=None, + sort=None, start=None): + """ + Retrieves a list of documents that match the specified search + criteria. How you specify the search criteria depends on which + query parser you use. Amazon CloudSearch supports four query + parsers: + + + + `simple`: search all `text` and `text-array` fields for the + specified string. Search for phrases, individual terms, and + prefixes. + + `structured`: search specific fields, construct compound + queries using Boolean operators, and use advanced features + such as term boosting and proximity searching. + + `lucene`: specify search criteria using the Apache Lucene + query parser syntax. + + `dismax`: specify search criteria using the simplified + subset of the Apache Lucene query parser syntax defined by the + DisMax query parser. + + + For more information, see `Searching Your Data`_ in the Amazon + CloudSearch Developer Guide . + + The endpoint for submitting `Search` requests is domain- + specific. You submit search requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type cursor: string + :param cursor: Retrieves a cursor value you can use to page through + large result sets. Use the `size` parameter to control the number + of hits to include in each response. You can specify either the + `cursor` or `start` parameter in a request; they are mutually + exclusive. To get the first cursor, set the cursor value to + `initial`. In subsequent requests, specify the cursor value + returned in the hits section of the response. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + :type expr: string + :param expr: Defines one or more numeric expressions that can be used + to sort results or specify search or filter criteria. You can also + specify expressions as return fields. + For more information about defining and using expressions, see + `Configuring Expressions`_ in the Amazon CloudSearch Developer + Guide . + + :type facet: string + :param facet: Specifies one or more fields for which to get facet + information, and options that control how the facet information is + returned. Each specified field must be facet-enabled in the domain + configuration. The fields and options are specified in JSON using + the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI + ON":VALUE,"OPTION":"STRING"}}`. + You can specify the following faceting options: + + + + `buckets` specifies an array of the facet values or ranges to count. + Ranges are specified using the same syntax that you use to search + for a range of values. For more information, see ` Searching for a + Range of Values`_ in the Amazon CloudSearch Developer Guide . + Buckets are returned in the order they are specified in the + request. The `sort` and `size` options are not valid if you specify + `buckets`. + + `size` specifies the maximum number of facets to include in the + results. By default, Amazon CloudSearch returns counts for the top + 10. The `size` parameter is only valid when you specify the `sort` + option; it cannot be used in conjunction with `buckets`. + + `sort` specifies how you want to sort the facets in the results: + `bucket` or `count`. Specify `bucket` to sort alphabetically or + numerically by facet value (in ascending order). Specify `count` to + sort by the facet counts computed for each facet value (in + descending order). To retrieve facet counts for particular values + or ranges of values, use the `buckets` option instead of `sort`. + + + If no facet options are specified, facet counts are computed for all + field values, the facets are sorted by facet count, and the top 10 + facets are returned in the results. + + For more information, see `Getting and Using Facet Information`_ in the + Amazon CloudSearch Developer Guide . + + :type filter_query: string + :param filter_query: Specifies a structured query that filters the + results of a search without affecting how the results are scored + and sorted. You use `filterQuery` in conjunction with the `query` + parameter to filter the documents that match the constraints + specified in the `query` parameter. Specifying a filter controls + only which matching documents are included in the results, it has + no effect on how they are scored and sorted. The `filterQuery` + parameter supports the full structured query syntax. + For more information about using filters, see `Filtering Matching + Documents`_ in the Amazon CloudSearch Developer Guide . + + :type highlight: string + :param highlight: Retrieves highlights for matches in the specified + `text` or `text-array` fields. Each specified field must be + highlight enabled in the domain configuration. The fields and + options are specified in JSON using the form `{"FIELD":{"OPTION":VA + LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`. + You can specify the following highlight options: + + + + `format`: specifies the format of the data in the text field: `text` + or `html`. When data is returned as HTML, all non-alphanumeric + characters are encoded. The default is `html`. + + `max_phrases`: specifies the maximum number of occurrences of the + search term(s) you want to highlight. By default, the first + occurrence is highlighted. + + `pre_tag`: specifies the string to prepend to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + `post_tag`: specifies the string to append to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + + If no highlight options are specified for a field, the returned field + text is treated as HTML and the first match is highlighted with + emphasis tags: `search-term`. + + :type partial: boolean + :param partial: Enables partial results to be returned if one or more + index partitions are unavailable. When your search index is + partitioned across multiple search instances, by default Amazon + CloudSearch only returns results if every partition can be queried. + This means that the failure of a single search instance can result + in 5xx (internal server) errors. When you enable partial results, + Amazon CloudSearch returns whatever results are available and + includes the percentage of documents searched in the search results + (percent-searched). This enables you to more gracefully degrade + your users' search experience. For example, rather than displaying + no results, you could display the partial results and a message + indicating that the results might be incomplete due to a temporary + system outage. + + :type query: string + :param query: Specifies the search criteria for the request. How you + specify the search criteria depends on the query parser used for + the request and the parser options specified in the `queryOptions` + parameter. By default, the `simple` query parser is used to process + requests. To use the `structured`, `lucene`, or `dismax` query + parser, you must also specify the `queryParser` parameter. + For more information about specifying search criteria, see `Searching + Your Data`_ in the Amazon CloudSearch Developer Guide . + + :type query_options: string + :param query_options: + Configures options for the query parser specified in the `queryParser` + parameter. + + The options you can configure vary according to which parser you use: + + + + `defaultOperator`: The default operator used to combine individual + terms in the search string. For example: `defaultOperator: 'or'`. + For the `dismax` parser, you specify a percentage that represents + the percentage of terms in the search string (rounded down) that + must match, rather than a default operator. A value of `0%` is the + equivalent to OR, and a value of `100%` is equivalent to AND. The + percentage must be specified as a value in the range 0-100 followed + by the percent (%) symbol. For example, `defaultOperator: 50%`. + Valid values: `and`, `or`, a percentage in the range 0%-100% ( + `dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or + `100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `fields`: An array of the fields to search when no fields are + specified in a search. If no fields are specified in a search and + this option is not specified, all text and text-array fields are + searched. You can specify a weight for each field to control the + relative importance of each field when Amazon CloudSearch + calculates relevance scores. To specify a field weight, append a + caret ( `^`) symbol and the weight to the field name. For example, + to boost the importance of the `title` field over the `description` + field you could specify: `"fields":["title^5","description"]`. + Valid values: The name of any configured field and an optional + numeric value greater than zero. Default: All `text` and `text- + array` fields. Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `operators`: An array of the operators or special characters you want + to disable for the simple query parser. If you disable the `and`, + `or`, or `not` operators, the corresponding operators ( `+`, `|`, + `-`) have no special meaning and are dropped from the search + string. Similarly, disabling `prefix` disables the wildcard + operator ( `*`) and disabling `phrase` disables the ability to + search for phrases by enclosing phrases in double quotes. Disabling + precedence disables the ability to control order of precedence + using parentheses. Disabling `near` disables the ability to use the + ~ operator to perform a sloppy phrase search. Disabling the `fuzzy` + operator disables the ability to use the ~ operator to perform a + fuzzy search. `escape` disables the ability to use a backslash ( + `\`) to escape special characters within the search string. + Disabling whitespace is an advanced option that prevents the parser + from tokenizing on whitespace, which can be useful for Vietnamese. + (It prevents Vietnamese words from being split incorrectly.) For + example, you could disable all operators other than the phrase + operator to support just simple term and phrase queries: + `"operators":["and","not","or", "prefix"]`. Valid values: `and`, + `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`, + `prefix`, `whitespace`. Default: All operators and special + characters are enabled. Valid for: `simple`. + + `phraseFields`: An array of the `text` or `text-array` fields you + want to use for phrase searches. When the terms in the search + string appear in close proximity within a field, the field scores + higher. You can specify a weight for each field to boost that + score. The `phraseSlop` option controls how much the matches can + deviate from the search string and still be boosted. To specify a + field weight, append a caret ( `^`) symbol and the weight to the + field name. For example, to boost phrase matches in the `title` + field over the `abstract` field, you could specify: + `"phraseFields":["title^3", "plot"]` Valid values: The name of any + `text` or `text-array` field and an optional numeric value greater + than zero. Default: No fields. If you don't specify any fields with + `phraseFields`, proximity scoring is disabled even if `phraseSlop` + is specified. Valid for: `dismax`. + + `phraseSlop`: An integer value that specifies how much matches can + deviate from the search phrase and still be boosted according to + the weights specified in the `phraseFields` option; for example, + `phraseSlop: 2`. You must also specify `phraseFields` to enable + proximity scoring. Valid values: positive integers. Default: 0. + Valid for: `dismax`. + + `explicitPhraseSlop`: An integer value that specifies how much a + match can deviate from the search phrase when the phrase is + enclosed in double quotes in the search string. (Phrases that + exceed this proximity distance are not considered a match.) For + example, to specify a slop of three for dismax phrase queries, you + would specify `"explicitPhraseSlop":3`. Valid values: positive + integers. Default: 0. Valid for: `dismax`. + + `tieBreaker`: When a term in the search string is found in a + document's field, a score is calculated for that field based on how + common the word is in that field compared to other documents. If + the term occurs in multiple fields within a document, by default + only the highest scoring field contributes to the document's + overall score. You can specify a `tieBreaker` value to enable the + matches in lower-scoring fields to contribute to the document's + score. That way, if two documents have the same max field score for + a particular term, the score for the document that has matches in + more fields will be higher. The formula for calculating the score + with a tieBreaker is `(max field score) + (tieBreaker) * (sum of + the scores for the rest of the matching fields)`. Set `tieBreaker` + to 0 to disregard all but the highest scoring field (pure max): + `"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure + sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0. + Valid for: `dismax`. + + :type query_parser: string + :param query_parser: + Specifies which query parser to use to process the request. If + `queryParser` is not specified, Amazon CloudSearch uses the + `simple` query parser. + + Amazon CloudSearch supports four query parsers: + + + + `simple`: perform simple searches of `text` and `text-array` fields. + By default, the `simple` query parser searches all `text` and + `text-array` fields. You can specify which fields to search by with + the `queryOptions` parameter. If you prefix a search term with a + plus sign (+) documents must contain the term to be considered a + match. (This is the default, unless you configure the default + operator with the `queryOptions` parameter.) You can use the `-` + (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular + terms, find results that match any of the specified terms, or + search for a prefix. To search for a phrase rather than individual + terms, enclose the phrase in double quotes. For more information, + see `Searching for Text`_ in the Amazon CloudSearch Developer Guide + . + + `structured`: perform advanced searches by combining multiple + expressions to define the search criteria. You can also search + within particular fields, search for values and ranges of values, + and use advanced options such as term boosting, `matchall`, and + `near`. For more information, see `Constructing Compound Queries`_ + in the Amazon CloudSearch Developer Guide . + + `lucene`: search using the Apache Lucene query parser syntax. For + more information, see `Apache Lucene Query Parser Syntax`_. + + `dismax`: search using the simplified subset of the Apache Lucene + query parser syntax defined by the DisMax query parser. For more + information, see `DisMax Query Parser Syntax`_. + + :type ret: string + :param ret: Specifies the field and expression values to include in + the response. Multiple fields or expressions are specified as a + comma-separated list. By default, a search response includes all + return enabled fields ( `_all_fields`). To return only the document + IDs for the matching documents, specify `_no_fields`. To retrieve + the relevance score calculated for each document, specify `_score`. + + :type size: long + :param size: Specifies the maximum number of search hits to include in + the response. + + :type sort: string + :param sort: Specifies the fields or custom expressions to use to sort + the search results. Multiple fields or expressions are specified as + a comma-separated list. You must specify the sort direction ( `asc` + or `desc`) for each field; for example, `year desc,title asc`. To + use a field to sort results, the field must be sort-enabled in the + domain configuration. Array type fields cannot be used for sorting. + If no `sort` parameter is specified, results are sorted by their + default relevance scores in descending order: `_score desc`. You + can also sort by document ID ( `_id asc`) and version ( `_version + desc`). + For more information, see `Sorting Results`_ in the Amazon CloudSearch + Developer Guide . + + :type start: long + :param start: Specifies the offset of the first search hit you want to + return. Note that the result set is zero-based; the first result is + at index 0. You can specify either the `start` or `cursor` + parameter in a request, they are mutually exclusive. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + """ + uri = '/2013-01-01/search' + params = {} + headers = {} + query_params = {} + if cursor is not None: + query_params['cursor'] = cursor + if expr is not None: + query_params['expr'] = expr + if facet is not None: + query_params['facet'] = facet + if filter_query is not None: + query_params['fq'] = filter_query + if highlight is not None: + query_params['highlight'] = highlight + if partial is not None: + query_params['partial'] = partial + if query is not None: + query_params['q'] = query + if query_options is not None: + query_params['q.options'] = query_options + if query_parser is not None: + query_params['q.parser'] = query_parser + if ret is not None: + query_params['return'] = ret + if size is not None: + query_params['size'] = size + if sort is not None: + query_params['sort'] = sort + if start is not None: + query_params['start'] = start + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def suggest(self, query, suggester, size=None): + """ + Retrieves autocomplete suggestions for a partial query string. + You can use suggestions enable you to display likely matches + before users finish typing. In Amazon CloudSearch, suggestions + are based on the contents of a particular text field. When you + request suggestions, Amazon CloudSearch finds all of the + documents whose values in the suggester field start with the + specified query string. The beginning of the field must match + the query string to be considered a match. + + For more information about configuring suggesters and + retrieving suggestions, see `Getting Suggestions`_ in the + Amazon CloudSearch Developer Guide . + + The endpoint for submitting `Suggest` requests is domain- + specific. You submit suggest requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type query: string + :param query: Specifies the string for which you want to get + suggestions. + + :type suggester: string + :param suggester: Specifies the name of the suggester to use to find + suggested matches. + + :type size: long + :param size: Specifies the maximum number of suggestions to return. + + """ + uri = '/2013-01-01/suggest' + params = {} + headers = {} + query_params = {} + if query is not None: + query_params['q'] = query + if suggester is not None: + query_params['suggester'] = suggester + if size is not None: + query_params['size'] = size + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_documents(self, documents, content_type): + """ + Posts a batch of documents to a search domain for indexing. A + document batch is a collection of add and delete operations + that represent the documents you want to add, update, or + delete from your domain. Batches can be described in either + JSON or XML. Each item that you want Amazon CloudSearch to + return as a search result (such as a product) is represented + as a document. Every document has a unique ID and one or more + fields that contain the data that you want to search and + return in results. Individual documents cannot contain more + than 1 MB of data. The entire batch cannot exceed 5 MB. To get + the best possible upload performance, group add and delete + operations in batches that are close the 5 MB limit. + Submitting a large volume of single-document batches can + overload a domain's document service. + + The endpoint for submitting `UploadDocuments` requests is + domain-specific. To get the document endpoint for your domain, + use the Amazon CloudSearch configuration service + `DescribeDomains` action. A domain's endpoints are also + displayed on the domain dashboard in the Amazon CloudSearch + console. + + For more information about formatting your data for Amazon + CloudSearch, see `Preparing Your Data`_ in the Amazon + CloudSearch Developer Guide . For more information about + uploading data for indexing, see `Uploading Data`_ in the + Amazon CloudSearch Developer Guide . + + :type documents: blob + :param documents: A batch of documents formatted in JSON or HTML. + + :type content_type: string + :param content_type: + The format of the batch you are uploading. Amazon CloudSearch supports + two document batch formats: + + + + application/json + + application/xml + + """ + uri = '/2013-01-01/documents/batch' + headers = {} + query_params = {} + if content_type is not None: + headers['Content-Type'] = content_type + return self.make_request('POST', uri, expected_status=200, + data=documents, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + raise JSONResponseError(response.status, response.reason, body) diff -Nru python-boto-2.34.0/boto/cloudtrail/exceptions.py python-boto-2.38.0/boto/cloudtrail/exceptions.py --- python-boto-2.34.0/boto/cloudtrail/exceptions.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudtrail/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -88,3 +88,31 @@ write files into the prefix. """ pass + + +class InvalidMaxResultsException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class InvalidLookupAttributesException(BotoServerError): + pass + + +class InvalidCloudWatchLogsLogGroupArnException(BotoServerError): + pass + + +class InvalidCloudWatchLogsRoleArnException(BotoServerError): + pass + + +class CloudWatchLogsDeliveryUnavailableException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/cloudtrail/layer1.py python-boto-2.38.0/boto/cloudtrail/layer1.py --- python-boto-2.34.0/boto/cloudtrail/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cloudtrail/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -30,7 +30,7 @@ class CloudTrailConnection(AWSQueryConnection): """ - AWS Cloud Trail + AWS CloudTrail This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail. @@ -62,26 +62,33 @@ ResponseError = JSONResponseError _faults = { + "InvalidMaxResultsException": exceptions.InvalidMaxResultsException, "InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException, "InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException, "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException, "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException, + "InvalidCloudWatchLogsLogGroupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException, + "InvalidCloudWatchLogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException, "InvalidTrailNameException": exceptions.InvalidTrailNameException, - "TrailNotProvidedException": exceptions.TrailNotProvidedException, + "CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException, "TrailNotFoundException": exceptions.TrailNotFoundException, "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, "InvalidS3PrefixException": exceptions.InvalidS3PrefixException, "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException, "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException, } + def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) - if 'host' not in kwargs: + if 'host' not in kwargs or kwargs['host'] is None: kwargs['host'] = region.endpoint super(CloudTrailConnection, self).__init__(**kwargs) @@ -90,21 +97,16 @@ def _required_auth_capability(self): return ['hmac-v4'] - def create_trail(self, name=None, s3_bucket_name=None, - s3_key_prefix=None, sns_topic_name=None, - include_global_service_events=None, trail=None): + def create_trail(self, name, s3_bucket_name, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): """ From the command line, use `create-subscription`. Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. - Support for passing Trail as a parameter ends as early as - February 25, 2014. The request and response examples in this - topic show the use of parameters as well as a Trail object. - Until Trail is removed, you can use either Trail or the - parameter list. - :type name: string :param name: Specifies the name of the trail. @@ -125,26 +127,28 @@ publishing events from global services such as IAM to the log files. - :type trail: dict - :param trail: Support for passing a Trail object in the CreateTrail or - UpdateTrail actions will end as early as February 15, 2014. Instead - of the Trail object and its members, use the parameters listed for - these actions. + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. """ - params = {} - if name is not None: - params['Name'] = name - if s3_bucket_name is not None: - params['S3BucketName'] = s3_bucket_name + params = {'Name': name, 'S3BucketName': s3_bucket_name, } if s3_key_prefix is not None: params['S3KeyPrefix'] = s3_key_prefix if sns_topic_name is not None: params['SnsTopicName'] = sns_topic_name if include_global_service_events is not None: params['IncludeGlobalServiceEvents'] = include_global_service_events - if trail is not None: - params['trail'] = trail + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn return self.make_request(action='CreateTrail', body=json.dumps(params)) @@ -162,11 +166,11 @@ def describe_trails(self, trail_name_list=None): """ - Retrieves the settings for some or all trails associated with - an account. + Retrieves settings for the trail associated with the current + region for your account. :type trail_name_list: list - :param trail_name_list: The list of trails. + :param trail_name_list: The trail returned. """ params = {} @@ -182,49 +186,6 @@ errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. - The CloudTrail API is currently undergoing revision. This - action currently returns both new fields and fields slated for - removal from the API. The following lists indicate the plans - for each field: - - **List of Members Planned for Ongoing Support** - - - + IsLogging - + LatestDeliveryTime - + LatestNotificationTime - + StartLoggingTime - + StopLoggingTime - + LatestNotificationError - + LatestDeliveryError - - - **List of Members Scheduled for Removal** - - - + **LatestDeliveryAttemptTime**: Use LatestDeliveryTime - instead. - + **LatestNotificationAttemptTime**: Use - LatestNotificationTime instead. - + **LatestDeliveryAttemptSucceeded**: No replacement. See the - note following this list. - + **LatestNotificationAttemptSucceeded**: No replacement. See - the note following this list. - + **TimeLoggingStarted**: Use StartLoggingTime instead. - + **TimeLoggingStopped**: Use StopLoggingtime instead. - - - No replacements have been created for - LatestDeliveryAttemptSucceeded and - LatestNotificationAttemptSucceeded . Use LatestDeliveryError - and LatestNotificationError to evaluate success or failure of - log delivery or notification. Empty values returned for these - fields indicate success. An error in LatestDeliveryError - generally indicates either a missing bucket or insufficient - permissions to write to the bucket. Similarly, an error in - LatestNotificationError indicates either a missing topic or - insufficient permissions. - :type name: string :param name: The name of the trail for which you are requesting the current status. @@ -234,6 +195,68 @@ return self.make_request(action='GetTrailStatus', body=json.dumps(params)) + def lookup_events(self, lookup_attributes=None, start_time=None, + end_time=None, max_results=None, next_token=None): + """ + Looks up API activity events captured by CloudTrail that + create, update, or delete resources in your account. Events + for a region can be looked up for the times in which you had + CloudTrail turned on in that region during the last seven + days. Lookup supports five different attributes: time range + (defined by a start time and end time), user name, event name, + resource type, and resource name. All attributes are optional. + The maximum number of attributes that can be specified in any + one lookup request are time range and one other attribute. The + default number of results returned is 10, with a maximum of 50 + possible. The response includes a token that you can use to + get the next page of results. + The rate of lookup requests is limited to one per second per + account. If this limit is exceeded, a throttling error occurs. + Events that occurred during the selected time range will not + be available for lookup if CloudTrail logging was not enabled + when the events occurred. + + :type lookup_attributes: list + :param lookup_attributes: Contains a list of lookup attributes. + Currently the list can contain only one item. + + :type start_time: timestamp + :param start_time: Specifies that only events that occur after or at + the specified time are returned. If the specified start time is + after the specified end time, an error is returned. + + :type end_time: timestamp + :param end_time: Specifies that only events that occur before or at the + specified time are returned. If the specified end time is before + the specified start time, an error is returned. + + :type max_results: integer + :param max_results: The number of events to return. Possible values are + 1 through 50. The default is 10. + + :type next_token: string + :param next_token: The token to use to get the next page of results + after a previous API call. This token must be passed in with the + same parameters that were specified in the the original call. For + example, if the original call specified an AttributeKey of + 'Username' with a value of 'root', the call with NextToken should + include those same parameters. + + """ + params = {} + if lookup_attributes is not None: + params['LookupAttributes'] = lookup_attributes + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupEvents', + body=json.dumps(params)) + def start_logging(self, name): """ Starts the recording of AWS API calls and log file delivery @@ -265,9 +288,10 @@ return self.make_request(action='StopLogging', body=json.dumps(params)) - def update_trail(self, name=None, s3_bucket_name=None, - s3_key_prefix=None, sns_topic_name=None, - include_global_service_events=None, trail=None): + def update_trail(self, name, s3_bucket_name=None, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): """ From the command line, use `update-subscription`. @@ -278,12 +302,6 @@ target for CloudTrail log files, an IAM policy exists for the bucket. - Support for passing Trail as a parameter ends as early as - February 25, 2014. The request and response examples in this - topic show the use of parameters as well as a Trail object. - Until Trail is removed, you can use either Trail or the - parameter list. - :type name: string :param name: Specifies the name of the trail. @@ -304,16 +322,18 @@ publishing events from global services such as IAM to the log files. - :type trail: dict - :param trail: Support for passing a Trail object in the CreateTrail or - UpdateTrail actions will end as early as February 15, 2014. Instead - of the Trail object and its members, use the parameters listed for - these actions. + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. """ - params = {} - if name is not None: - params['Name'] = name + params = {'Name': name, } if s3_bucket_name is not None: params['S3BucketName'] = s3_bucket_name if s3_key_prefix is not None: @@ -322,8 +342,10 @@ params['SnsTopicName'] = sns_topic_name if include_global_service_events is not None: params['IncludeGlobalServiceEvents'] = include_global_service_events - if trail is not None: - params['trail'] = trail + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn return self.make_request(action='UpdateTrail', body=json.dumps(params)) diff -Nru python-boto-2.34.0/boto/codedeploy/exceptions.py python-boto-2.38.0/boto/codedeploy/exceptions.py --- python-boto-2.34.0/boto/codedeploy/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/codedeploy/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,199 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidDeploymentIdException(BotoServerError): + pass + + +class InvalidDeploymentGroupNameException(BotoServerError): + pass + + +class DeploymentConfigAlreadyExistsException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class RoleRequiredException(BotoServerError): + pass + + +class DeploymentGroupAlreadyExistsException(BotoServerError): + pass + + +class DeploymentConfigLimitExceededException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidDeploymentConfigNameException(BotoServerError): + pass + + +class InvalidSortByException(BotoServerError): + pass + + +class InstanceDoesNotExistException(BotoServerError): + pass + + +class InvalidMinimumHealthyHostValueException(BotoServerError): + pass + + +class ApplicationLimitExceededException(BotoServerError): + pass + + +class ApplicationNameRequiredException(BotoServerError): + pass + + +class InvalidEC2TagException(BotoServerError): + pass + + +class DeploymentDoesNotExistException(BotoServerError): + pass + + +class DeploymentLimitExceededException(BotoServerError): + pass + + +class InvalidInstanceStatusException(BotoServerError): + pass + + +class RevisionRequiredException(BotoServerError): + pass + + +class InvalidBucketNameFilterException(BotoServerError): + pass + + +class DeploymentGroupLimitExceededException(BotoServerError): + pass + + +class DeploymentGroupDoesNotExistException(BotoServerError): + pass + + +class DeploymentConfigNameRequiredException(BotoServerError): + pass + + +class DeploymentAlreadyCompletedException(BotoServerError): + pass + + +class RevisionDoesNotExistException(BotoServerError): + pass + + +class DeploymentGroupNameRequiredException(BotoServerError): + pass + + +class DeploymentIdRequiredException(BotoServerError): + pass + + +class DeploymentConfigDoesNotExistException(BotoServerError): + pass + + +class BucketNameFilterRequiredException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class ApplicationDoesNotExistException(BotoServerError): + pass + + +class InvalidRevisionException(BotoServerError): + pass + + +class InvalidSortOrderException(BotoServerError): + pass + + +class InvalidOperationException(BotoServerError): + pass + + +class InvalidAutoScalingGroupException(BotoServerError): + pass + + +class InvalidApplicationNameException(BotoServerError): + pass + + +class DescriptionTooLongException(BotoServerError): + pass + + +class ApplicationAlreadyExistsException(BotoServerError): + pass + + +class InvalidDeployedStateFilterException(BotoServerError): + pass + + +class DeploymentNotStartedException(BotoServerError): + pass + + +class DeploymentConfigInUseException(BotoServerError): + pass + + +class InstanceIdRequiredException(BotoServerError): + pass + + +class InvalidKeyPrefixFilterException(BotoServerError): + pass + + +class InvalidDeploymentStatusException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/codedeploy/__init__.py python-boto-2.38.0/boto/codedeploy/__init__.py --- python-boto-2.34.0/boto/codedeploy/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/codedeploy/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS CodeDeploy service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return get_regions('codedeploy', connection_cls=CodeDeployConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/codedeploy/layer1.py python-boto-2.38.0/boto/codedeploy/layer1.py --- python-boto-2.34.0/boto/codedeploy/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/codedeploy/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,899 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.codedeploy import exceptions + + +class CodeDeployConnection(AWSQueryConnection): + """ + AWS CodeDeploy **Overview** + This is the AWS CodeDeploy API Reference. This guide provides + descriptions of the AWS CodeDeploy APIs. For additional + information, see the `AWS CodeDeploy User Guide`_. + **Using the APIs** + You can use the AWS CodeDeploy APIs to work with the following + items: + + + + Applications , which are unique identifiers that AWS CodeDeploy + uses to ensure that the correct combinations of revisions, + deployment configurations, and deployment groups are being + referenced during deployments. You can work with applications by + calling CreateApplication, DeleteApplication, GetApplication, + ListApplications, BatchGetApplications, and UpdateApplication to + create, delete, and get information about applications, and to + change information about an application, respectively. + + Deployment configurations , which are sets of deployment rules + and deployment success and failure conditions that AWS CodeDeploy + uses during deployments. You can work with deployment + configurations by calling CreateDeploymentConfig, + DeleteDeploymentConfig, GetDeploymentConfig, and + ListDeploymentConfigs to create, delete, and get information about + deployment configurations, respectively. + + Deployment groups , which represent groups of Amazon EC2 + instances to which application revisions can be deployed. You can + work with deployment groups by calling CreateDeploymentGroup, + DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups, + and UpdateDeploymentGroup to create, delete, and get information + about single and multiple deployment groups, and to change + information about a deployment group, respectively. + + Deployment instances (also known simply as instances ), which + represent Amazon EC2 instances to which application revisions are + deployed. Deployment instances are identified by their Amazon EC2 + tags or Auto Scaling group names. Deployment instances belong to + deployment groups. You can work with deployment instances by + calling GetDeploymentInstance and ListDeploymentInstances to get + information about single and multiple deployment instances, + respectively. + + Deployments , which represent the process of deploying revisions + to deployment groups. You can work with deployments by calling + CreateDeployment, GetDeployment, ListDeployments, + BatchGetDeployments, and StopDeployment to create and get + information about deployments, and to stop a deployment, + respectively. + + Application revisions (also known simply as revisions ), which + are archive files that are stored in Amazon S3 buckets or GitHub + repositories. These revisions contain source content (such as + source code, web pages, executable files, any deployment scripts, + and similar) along with an Application Specification file (AppSpec + file). (The AppSpec file is unique to AWS CodeDeploy; it defines a + series of deployment actions that you want AWS CodeDeploy to + execute.) An application revision is uniquely identified by its + Amazon S3 object key and its ETag, version, or both. Application + revisions are deployed to deployment groups. You can work with + application revisions by calling GetApplicationRevision, + ListApplicationRevisions, and RegisterApplicationRevision to get + information about application revisions and to inform AWS + CodeDeploy about an application revision, respectively. + """ + APIVersion = "2014-10-06" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com" + ServiceName = "codedeploy" + TargetPrefix = "CodeDeploy_20141006" + ResponseError = JSONResponseError + + _faults = { + "InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException, + "InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException, + "DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException, + "InvalidRoleException": exceptions.InvalidRoleException, + "RoleRequiredException": exceptions.RoleRequiredException, + "DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException, + "DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException, + "InvalidSortByException": exceptions.InvalidSortByException, + "InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException, + "InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException, + "ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException, + "ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException, + "InvalidEC2TagException": exceptions.InvalidEC2TagException, + "DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException, + "DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException, + "InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException, + "RevisionRequiredException": exceptions.RevisionRequiredException, + "InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException, + "DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException, + "DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException, + "DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException, + "DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException, + "RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException, + "DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException, + "DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException, + "DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException, + "BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException, + "InvalidRevisionException": exceptions.InvalidRevisionException, + "InvalidSortOrderException": exceptions.InvalidSortOrderException, + "InvalidOperationException": exceptions.InvalidOperationException, + "InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException, + "InvalidApplicationNameException": exceptions.InvalidApplicationNameException, + "DescriptionTooLongException": exceptions.DescriptionTooLongException, + "ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException, + "InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException, + "DeploymentNotStartedException": exceptions.DeploymentNotStartedException, + "DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException, + "InstanceIdRequiredException": exceptions.InstanceIdRequiredException, + "InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException, + "InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CodeDeployConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def batch_get_applications(self, application_names=None): + """ + Gets information about one or more applications. + + :type application_names: list + :param application_names: A list of application names, with multiple + application names separated by spaces. + + """ + params = {} + if application_names is not None: + params['applicationNames'] = application_names + return self.make_request(action='BatchGetApplications', + body=json.dumps(params)) + + def batch_get_deployments(self, deployment_ids=None): + """ + Gets information about one or more deployments. + + :type deployment_ids: list + :param deployment_ids: A list of deployment IDs, with multiple + deployment IDs separated by spaces. + + """ + params = {} + if deployment_ids is not None: + params['deploymentIds'] = deployment_ids + return self.make_request(action='BatchGetDeployments', + body=json.dumps(params)) + + def create_application(self, application_name): + """ + Creates a new application. + + :type application_name: string + :param application_name: The name of the application. This name must be + unique within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='CreateApplication', + body=json.dumps(params)) + + def create_deployment(self, application_name, deployment_group_name=None, + revision=None, deployment_config_name=None, + description=None, + ignore_application_stop_failures=None): + """ + Deploys an application revision to the specified deployment + group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The deployment group's name. + + :type revision: dict + :param revision: The type of revision to deploy, along with information + about the revision's location. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + If not specified, the value configured in the deployment group will be + used as the default. If the deployment group does not have a + deployment configuration associated with it, then + CodeDeployDefault.OneAtATime will be used by default. + + :type description: string + :param description: A comment about the deployment. + + :type ignore_application_stop_failures: boolean + :param ignore_application_stop_failures: If set to true, then if the + deployment causes the ApplicationStop deployment lifecycle event to + fail to a specific instance, the deployment will not be considered + to have failed to that instance at that point and will continue on + to the BeforeInstall deployment lifecycle event. + If set to false or not specified, then if the deployment causes the + ApplicationStop deployment lifecycle event to fail to a specific + instance, the deployment will stop to that instance, and the + deployment to that instance will be considered to have failed. + + """ + params = {'applicationName': application_name, } + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if revision is not None: + params['revision'] = revision + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if description is not None: + params['description'] = description + if ignore_application_stop_failures is not None: + params['ignoreApplicationStopFailures'] = ignore_application_stop_failures + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_deployment_config(self, deployment_config_name, + minimum_healthy_hosts=None): + """ + Creates a new deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of the deployment configuration + to create. + + :type minimum_healthy_hosts: dict + :param minimum_healthy_hosts: The minimum number of healthy instances + that should be available at any time during the deployment. There + are two parameters expected in the input: type and value. + The type parameter takes either of the following values: + + + + HOST_COUNT: The value parameter represents the minimum number of + healthy instances, as an absolute value. + + FLEET_PERCENT: The value parameter represents the minimum number of + healthy instances, as a percentage of the total number of instances + in the deployment. If you specify FLEET_PERCENT, then at the start + of the deployment AWS CodeDeploy converts the percentage to the + equivalent number of instances and rounds fractional instances up. + + + The value parameter takes an integer. + + For example, to set a minimum of 95% healthy instances, specify a type + of FLEET_PERCENT and a value of 95. + + """ + params = {'deploymentConfigName': deployment_config_name, } + if minimum_healthy_hosts is not None: + params['minimumHealthyHosts'] = minimum_healthy_hosts + return self.make_request(action='CreateDeploymentConfig', + body=json.dumps(params)) + + def create_deployment_group(self, application_name, + deployment_group_name, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Creates a new deployment group for application revisions to be + deployed to. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type deployment_config_name: string + :param deployment_config_name: If specified, the deployment + configuration name must be one of the predefined values, or it can + be a custom deployment configuration: + + + CodeDeployDefault.AllAtOnce deploys an application revision to up to + all of the Amazon EC2 instances at once. The overall deployment + succeeds if the application revision deploys to at least one of the + instances. The overall deployment fails after the application + revision fails to deploy to all of the instances. For example, for + 9 instances, deploy to up to all 9 instances at once. The overall + deployment succeeds if any of the 9 instances is successfully + deployed to, and it fails if all 9 instances fail to be deployed + to. + + CodeDeployDefault.HalfAtATime deploys to up to half of the instances + at a time (with fractions rounded down). The overall deployment + succeeds if the application revision deploys to at least half of + the instances (with fractions rounded up); otherwise, the + deployment fails. For example, for 9 instances, deploy to up to 4 + instances at a time. The overall deployment succeeds if 5 or more + instances are successfully deployed to; otherwise, the deployment + fails. Note that the deployment may successfully deploy to some + instances, even if the overall deployment fails. + + CodeDeployDefault.OneAtATime deploys the application revision to only + one of the instances at a time. The overall deployment succeeds if + the application revision deploys to all of the instances. The + overall deployment fails after the application revision first fails + to deploy to any one instance. For example, for 9 instances, deploy + to one instance at a time. The overall deployment succeeds if all 9 + instances are successfully deployed to, and it fails if any of one + of the 9 instances fail to be deployed to. Note that the deployment + may successfully deploy to some instances, even if the overall + deployment fails. This is the default deployment configuration if a + configuration isn't specified for either the deployment or the + deployment group. + + + To create a custom deployment configuration, call the create deployment + configuration operation. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The Amazon EC2 tags to filter on. + + :type auto_scaling_groups: list + :param auto_scaling_groups: A list of associated Auto Scaling groups. + + :type service_role_arn: string + :param service_role_arn: A service role ARN that allows AWS CodeDeploy + to act on the user's behalf when interacting with AWS services. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='CreateDeploymentGroup', + body=json.dumps(params)) + + def delete_application(self, application_name): + """ + Deletes an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='DeleteApplication', + body=json.dumps(params)) + + def delete_deployment_config(self, deployment_config_name): + """ + Deletes a deployment configuration. + + A deployment configuration cannot be deleted if it is + currently in use. Also, predefined configurations cannot be + deleted. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='DeleteDeploymentConfig', + body=json.dumps(params)) + + def delete_deployment_group(self, application_name, + deployment_group_name): + """ + Deletes a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='DeleteDeploymentGroup', + body=json.dumps(params)) + + def get_application(self, application_name): + """ + Gets information about an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='GetApplication', + body=json.dumps(params)) + + def get_application_revision(self, application_name, revision): + """ + Gets information about an application revision. + + :type application_name: string + :param application_name: The name of the application that corresponds + to the revision. + + :type revision: dict + :param revision: Information about the application revision to get, + including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + return self.make_request(action='GetApplicationRevision', + body=json.dumps(params)) + + def get_deployment(self, deployment_id): + """ + Gets information about a deployment. + + :type deployment_id: string + :param deployment_id: An existing deployment ID within the AWS user + account. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='GetDeployment', + body=json.dumps(params)) + + def get_deployment_config(self, deployment_config_name): + """ + Gets information about a deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='GetDeploymentConfig', + body=json.dumps(params)) + + def get_deployment_group(self, application_name, deployment_group_name): + """ + Gets information about a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='GetDeploymentGroup', + body=json.dumps(params)) + + def get_deployment_instance(self, deployment_id, instance_id): + """ + Gets information about an Amazon EC2 instance as part of a + deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type instance_id: string + :param instance_id: The unique ID of an Amazon EC2 instance in the + deployment's deployment group. + + """ + params = { + 'deploymentId': deployment_id, + 'instanceId': instance_id, + } + return self.make_request(action='GetDeploymentInstance', + body=json.dumps(params)) + + def list_application_revisions(self, application_name, sort_by=None, + sort_order=None, s_3_bucket=None, + s_3_key_prefix=None, deployed=None, + next_token=None): + """ + Lists information about revisions for an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type sort_by: string + :param sort_by: The column name to sort the list results by: + + + registerTime: Sort the list results by when the revisions were + registered with AWS CodeDeploy. + + firstUsedTime: Sort the list results by when the revisions were first + used by in a deployment. + + lastUsedTime: Sort the list results by when the revisions were last + used in a deployment. + + + If not specified or set to null, the results will be returned in an + arbitrary order. + + :type sort_order: string + :param sort_order: The order to sort the list results by: + + + ascending: Sort the list results in ascending order. + + descending: Sort the list results in descending order. + + + If not specified, the results will be sorted in ascending order. + + If set to null, the results will be sorted in an arbitrary order. + + :type s_3_bucket: string + :param s_3_bucket: A specific Amazon S3 bucket name to limit the search + for revisions. + If set to null, then all of the user's buckets will be searched. + + :type s_3_key_prefix: string + :param s_3_key_prefix: A specific key prefix for the set of Amazon S3 + objects to limit the search for revisions. + + :type deployed: string + :param deployed: + Whether to list revisions based on whether the revision is the target + revision of an deployment group: + + + + include: List revisions that are target revisions of a deployment + group. + + exclude: Do not list revisions that are target revisions of a + deployment group. + + ignore: List all revisions, regardless of whether they are target + revisions of a deployment group. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list application revisions call, which can be used to return the + next set of applications in the list. + + """ + params = {'applicationName': application_name, } + if sort_by is not None: + params['sortBy'] = sort_by + if sort_order is not None: + params['sortOrder'] = sort_order + if s_3_bucket is not None: + params['s3Bucket'] = s_3_bucket + if s_3_key_prefix is not None: + params['s3KeyPrefix'] = s_3_key_prefix + if deployed is not None: + params['deployed'] = deployed + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplicationRevisions', + body=json.dumps(params)) + + def list_applications(self, next_token=None): + """ + Lists the applications registered within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list applications call, which can be used to return the next set of + applications in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplications', + body=json.dumps(params)) + + def list_deployment_configs(self, next_token=None): + """ + Lists the deployment configurations within the AWS user + account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment configurations call, which can be used to return + the next set of deployment configurations in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentConfigs', + body=json.dumps(params)) + + def list_deployment_groups(self, application_name, next_token=None): + """ + Lists the deployment groups for an application registered + within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment groups call, which can be used to return the next + set of deployment groups in the list. + + """ + params = {'applicationName': application_name, } + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentGroups', + body=json.dumps(params)) + + def list_deployment_instances(self, deployment_id, next_token=None, + instance_status_filter=None): + """ + Lists the Amazon EC2 instances for a deployment within the AWS + user account. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment instances call, which can be used to return the + next set of deployment instances in the list. + + :type instance_status_filter: list + :param instance_status_filter: + A subset of instances to list, by status: + + + + Pending: Include in the resulting list those instances with pending + deployments. + + InProgress: Include in the resulting list those instances with in- + progress deployments. + + Succeeded: Include in the resulting list those instances with + succeeded deployments. + + Failed: Include in the resulting list those instances with failed + deployments. + + Skipped: Include in the resulting list those instances with skipped + deployments. + + Unknown: Include in the resulting list those instances with + deployments in an unknown state. + + """ + params = {'deploymentId': deployment_id, } + if next_token is not None: + params['nextToken'] = next_token + if instance_status_filter is not None: + params['instanceStatusFilter'] = instance_status_filter + return self.make_request(action='ListDeploymentInstances', + body=json.dumps(params)) + + def list_deployments(self, application_name=None, + deployment_group_name=None, + include_only_statuses=None, create_time_range=None, + next_token=None): + """ + Lists the deployments under a deployment group for an + application registered within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type include_only_statuses: list + :param include_only_statuses: A subset of deployments to list, by + status: + + + Created: Include in the resulting list created deployments. + + Queued: Include in the resulting list queued deployments. + + In Progress: Include in the resulting list in-progress deployments. + + Succeeded: Include in the resulting list succeeded deployments. + + Failed: Include in the resulting list failed deployments. + + Aborted: Include in the resulting list aborted deployments. + + :type create_time_range: dict + :param create_time_range: A deployment creation start- and end-time + range for returning a subset of the list of deployments. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployments call, which can be used to return the next set of + deployments in the list. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if include_only_statuses is not None: + params['includeOnlyStatuses'] = include_only_statuses + if create_time_range is not None: + params['createTimeRange'] = create_time_range + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeployments', + body=json.dumps(params)) + + def register_application_revision(self, application_name, revision, + description=None): + """ + Registers with AWS CodeDeploy a revision for the specified + application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type description: string + :param description: A comment about the revision. + + :type revision: dict + :param revision: Information about the application revision to + register, including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + if description is not None: + params['description'] = description + return self.make_request(action='RegisterApplicationRevision', + body=json.dumps(params)) + + def stop_deployment(self, deployment_id): + """ + Attempts to stop an ongoing deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='StopDeployment', + body=json.dumps(params)) + + def update_application(self, application_name=None, + new_application_name=None): + """ + Changes an existing application's name. + + :type application_name: string + :param application_name: The current name of the application that you + want to change. + + :type new_application_name: string + :param new_application_name: The new name that you want to change the + application to. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if new_application_name is not None: + params['newApplicationName'] = new_application_name + return self.make_request(action='UpdateApplication', + body=json.dumps(params)) + + def update_deployment_group(self, application_name, + current_deployment_group_name, + new_deployment_group_name=None, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Changes information about an existing deployment group. + + :type application_name: string + :param application_name: The application name corresponding to the + deployment group to update. + + :type current_deployment_group_name: string + :param current_deployment_group_name: The current name of the existing + deployment group. + + :type new_deployment_group_name: string + :param new_deployment_group_name: The new name of the deployment group, + if you want to change it. + + :type deployment_config_name: string + :param deployment_config_name: The replacement deployment configuration + name to use, if you want to change it. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The replacement set of Amazon EC2 tags to + filter on, if you want to change them. + + :type auto_scaling_groups: list + :param auto_scaling_groups: The replacement list of Auto Scaling groups + to be included in the deployment group, if you want to change them. + + :type service_role_arn: string + :param service_role_arn: A replacement service role's ARN, if you want + to change it. + + """ + params = { + 'applicationName': application_name, + 'currentDeploymentGroupName': current_deployment_group_name, + } + if new_deployment_group_name is not None: + params['newDeploymentGroupName'] = new_deployment_group_name + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='UpdateDeploymentGroup', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff -Nru python-boto-2.34.0/boto/cognito/identity/exceptions.py python-boto-2.38.0/boto/cognito/identity/exceptions.py --- python-boto-2.34.0/boto/cognito/identity/exceptions.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cognito/identity/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -20,6 +20,10 @@ pass +class DeveloperUserAlreadyRegisteredException(BotoServerError): + pass + + class TooManyRequestsException(BotoServerError): pass diff -Nru python-boto-2.34.0/boto/cognito/identity/layer1.py python-boto-2.38.0/boto/cognito/identity/layer1.py --- python-boto-2.34.0/boto/cognito/identity/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cognito/identity/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -31,19 +31,30 @@ class CognitoIdentityConnection(AWSQueryConnection): """ Amazon Cognito - Amazon Cognito is a web service that facilitates the delivery of - scoped, temporary credentials to mobile devices or other untrusted - environments. Amazon Cognito uniquely identifies a device or user - and supplies the user with a consistent identity throughout the - lifetime of an application. - - Amazon Cognito lets users authenticate with third-party identity - providers (Facebook, Google, or Login with Amazon). As a - developer, you decide which identity providers to trust. You can - also choose to support unauthenticated access from your - application. Your users are provided with Cognito tokens that - uniquely identify their device and any information provided about - third-party logins. + Amazon Cognito is a web service that delivers scoped temporary + credentials to mobile devices and other untrusted environments. + Amazon Cognito uniquely identifies a device and supplies the user + with a consistent identity over the lifetime of an application. + + Using Amazon Cognito, you can enable authentication with one or + more third-party identity providers (Facebook, Google, or Login + with Amazon), and you can also choose to support unauthenticated + access from your app. Cognito delivers a unique identifier for + each user and acts as an OpenID token provider trusted by AWS + Security Token Service (STS) to access temporary, limited- + privilege AWS credentials. + + To provide end-user credentials, first make an unsigned call to + GetId. If the end user is authenticated with one of the supported + identity providers, set the `Logins` map with the identity + provider token. `GetId` returns a unique identifier for the user. + + Next, make an unsigned call to GetOpenIdToken, which returns the + OpenID token necessary to call STS and retrieve AWS credentials. + This call expects the same `Logins` map as the `GetId` call, as + well as the `IdentityID` originally returned by `GetId`. The token + returned by `GetOpenIdToken` can be passed to the STS operation + `AssumeRoleWithWebIdentity`_ to retrieve AWS credentials. """ APIVersion = "2014-06-30" DefaultRegionName = "us-east-1" @@ -55,6 +66,7 @@ _faults = { "LimitExceededException": exceptions.LimitExceededException, "ResourceConflictException": exceptions.ResourceConflictException, + "DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException, "TooManyRequestsException": exceptions.TooManyRequestsException, "InvalidParameterException": exceptions.InvalidParameterException, "ResourceNotFoundException": exceptions.ResourceNotFoundException, @@ -80,11 +92,13 @@ def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, - supported_login_providers=None): + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): """ Creates a new identity pool. The identity pool is a store of user identity information that is specific to your AWS - account. + account. The limit on identity pools is 60 per account. :type identity_pool_name: string :param identity_pool_name: A string that you provide. @@ -97,6 +111,19 @@ :param supported_login_providers: Optional key:value pairs mapping provider names to provider app IDs. + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This name acts as a placeholder that allows + your backend and the Cognito service to communicate about the + developer provider. For the `DeveloperProviderName`, you can use + letters as well as period ( `.`), underscore ( `_`), and dash ( + `-`). + Once you have set a developer provider name, you cannot change it. + Please take care in setting this parameter. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + """ params = { 'IdentityPoolName': identity_pool_name, @@ -104,6 +131,10 @@ } if supported_login_providers is not None: params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns return self.make_request(action='CreateIdentityPool', body=json.dumps(params)) @@ -146,8 +177,13 @@ :param identity_pool_id: An identity pool ID in the format REGION:GUID. :type logins: map - :param logins: A set of optional name/value pairs that map provider + :param logins: A set of optional name-value pairs that map provider names to provider tokens. + The available provider names for `Logins` are as follows: + + + Facebook: `graph.facebook.com` + + Google: `accounts.google.com` + + Amazon: `www.amazon.com` """ params = { @@ -162,15 +198,17 @@ def get_open_id_token(self, identity_id, logins=None): """ Gets an OpenID token, using a known Cognito ID. This known - Cognito ID is returned from GetId. You can optionally add + Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link. + The OpenId token is valid for 15 minutes. + :type identity_id: string :param identity_id: A unique identifier in the format REGION:GUID. :type logins: map - :param logins: A set of optional name/value pairs that map provider + :param logins: A set of optional name-value pairs that map provider names to provider tokens. """ @@ -180,6 +218,69 @@ return self.make_request(action='GetOpenIdToken', body=json.dumps(params)) + def get_open_id_token_for_developer_identity(self, identity_pool_id, + logins, identity_id=None, + token_duration=None): + """ + Registers (or retrieves) a Cognito `IdentityId` and an OpenID + Connect token for a user authenticated by your backend + authentication process. Supplying multiple logins will create + an implicit linked account. You can only specify one developer + provider as part of the `Logins` map, which is linked to the + identity pool. The developer provider is the "domain" by which + Cognito will refer to your users. + + You can use `GetOpenIdTokenForDeveloperIdentity` to create a + new identity and to link new logins (that is, user credentials + issued by a public provider or developer provider) to an + existing identity. When you want to create a new identity, the + `IdentityId` should be null. When you want to associate a new + login with an existing authenticated/unauthenticated identity, + you can do so by providing the existing `IdentityId`. This API + will create the identity in the specified `IdentityPoolId`. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. Each name-value pair represents a user + from a public provider or developer provider. If the user is from a + developer provider, the name-value pair will follow the syntax + `"developer_provider_name": "developer_user_identifier"`. The + developer provider is the "domain" by which Cognito will refer to + your users; you provided this domain while creating/updating the + identity pool. The developer user identifier is an identifier from + your backend that uniquely identifies a user. When you create an + identity pool, you can specify the supported logins. + + :type token_duration: long + :param token_duration: The expiration time of the token, in seconds. + You can specify a custom expiration time for the token so that you + can cache it. If you don't provide an expiration time, the token is + valid for 15 minutes. You can exchange the token with Amazon STS + for temporary AWS credentials, which are valid for a maximum of one + hour. The maximum token duration you can set is 24 hours. You + should take care in setting the expiration time for a token, as + there are significant security implications: an attacker could use + a leaked token to access your AWS resources for the token's + duration. + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'Logins': logins, + } + if identity_id is not None: + params['IdentityId'] = identity_id + if token_duration is not None: + params['TokenDuration'] = token_duration + return self.make_request(action='GetOpenIdTokenForDeveloperIdentity', + body=json.dumps(params)) + def list_identities(self, identity_pool_id, max_results, next_token=None): """ Lists the identities in a pool. @@ -221,6 +322,138 @@ return self.make_request(action='ListIdentityPools', body=json.dumps(params)) + def lookup_developer_identity(self, identity_pool_id, identity_id=None, + developer_user_identifier=None, + max_results=None, next_token=None): + """ + Retrieves the `IdentityID` associated with a + `DeveloperUserIdentifier` or the list of + `DeveloperUserIdentifier`s associated with an `IdentityId` for + an existing identity. Either `IdentityID` or + `DeveloperUserIdentifier` must not be null. If you supply only + one of these values, the other value will be searched in the + database and returned as a part of the response. If you supply + both, `DeveloperUserIdentifier` will be matched against + `IdentityID`. If the values are verified against the database, + the response returns both values and is the same as the + request. Otherwise a `ResourceConflictException` is thrown. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. Typically, a developer + identity provider would issue many developer user identifiers, in + keeping with the number of users. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. The first call you make will + have `NextToken` set to null. After that the service will return + `NextToken` values as needed. For example, let's say you make a + request with `MaxResults` set to 10, and there are 20 matches in + the database. The service will return a pagination token as a part + of the response. This token can be used to call the API again and + get results starting from the 11th match. + + """ + params = {'IdentityPoolId': identity_pool_id, } + if identity_id is not None: + params['IdentityId'] = identity_id + if developer_user_identifier is not None: + params['DeveloperUserIdentifier'] = developer_user_identifier + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupDeveloperIdentity', + body=json.dumps(params)) + + def merge_developer_identities(self, source_user_identifier, + destination_user_identifier, + developer_provider_name, identity_pool_id): + """ + Merges two users having different `IdentityId`s, existing in + the same identity pool, and identified by the same developer + provider. You can use this action to request that discrete + users be merged and identified as a single user in the Cognito + environment. Cognito associates the given source user ( + `SourceUserIdentifier`) with the `IdentityId` of the + `DestinationUserIdentifier`. Only developer-authenticated + users can be merged. If the users to be merged are associated + with the same public provider, but as two different users, an + exception will be thrown. + + :type source_user_identifier: string + :param source_user_identifier: User identifier for the source user. The + value should be a `DeveloperUserIdentifier`. + + :type destination_user_identifier: string + :param destination_user_identifier: User identifier for the destination + user. The value should be a `DeveloperUserIdentifier`. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This is a (pseudo) domain name that you + provide while creating an identity pool. This name acts as a + placeholder that allows your backend and the Cognito service to + communicate about the developer provider. For the + `DeveloperProviderName`, you can use letters as well as period (.), + underscore (_), and dash (-). + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = { + 'SourceUserIdentifier': source_user_identifier, + 'DestinationUserIdentifier': destination_user_identifier, + 'DeveloperProviderName': developer_provider_name, + 'IdentityPoolId': identity_pool_id, + } + return self.make_request(action='MergeDeveloperIdentities', + body=json.dumps(params)) + + def unlink_developer_identity(self, identity_id, identity_pool_id, + developer_provider_name, + developer_user_identifier): + """ + Unlinks a `DeveloperUserIdentifier` from an existing identity. + Unlinked developer users will be considered new identities + next time they are seen. If, for a given Cognito identity, you + remove all federated identities as well as the developer user + identifier, the Cognito identity becomes inaccessible. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. + + """ + params = { + 'IdentityId': identity_id, + 'IdentityPoolId': identity_pool_id, + 'DeveloperProviderName': developer_provider_name, + 'DeveloperUserIdentifier': developer_user_identifier, + } + return self.make_request(action='UnlinkDeveloperIdentity', + body=json.dumps(params)) + def unlink_identity(self, identity_id, logins, logins_to_remove): """ Unlinks a federated identity from an existing account. @@ -232,7 +465,7 @@ :param identity_id: A unique identifier in the format REGION:GUID. :type logins: map - :param logins: A set of optional name/value pairs that map provider + :param logins: A set of optional name-value pairs that map provider names to provider tokens. :type logins_to_remove: list @@ -249,7 +482,9 @@ def update_identity_pool(self, identity_pool_id, identity_pool_name, allow_unauthenticated_identities, - supported_login_providers=None): + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): """ Updates a user pool. @@ -267,6 +502,13 @@ :param supported_login_providers: Optional key:value pairs mapping provider names to provider app IDs. + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + """ params = { 'IdentityPoolId': identity_pool_id, @@ -275,6 +517,10 @@ } if supported_login_providers is not None: params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns return self.make_request(action='UpdateIdentityPool', body=json.dumps(params)) diff -Nru python-boto-2.34.0/boto/cognito/sync/exceptions.py python-boto-2.38.0/boto/cognito/sync/exceptions.py --- python-boto-2.34.0/boto/cognito/sync/exceptions.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cognito/sync/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -30,6 +30,10 @@ pass +class InvalidConfigurationException(BotoServerError): + pass + + class TooManyRequestsException(BotoServerError): pass diff -Nru python-boto-2.34.0/boto/cognito/sync/layer1.py python-boto-2.38.0/boto/cognito/sync/layer1.py --- python-boto-2.34.0/boto/cognito/sync/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/cognito/sync/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -39,6 +39,11 @@ user ID and credentials. User data is persisted in a dataset that can store up to 1 MB of key-value pairs, and you can have up to 20 datasets per user identity. + + With Amazon Cognito Sync, the data stored for each identity is + accessible only to credentials assigned to that identity. In order + to use the Cognito Sync service, you need to make API calls using + credentials retrieved with `Amazon Cognito Identity service`_. """ APIVersion = "2014-06-30" DefaultRegionName = "us-east-1" @@ -48,6 +53,7 @@ _faults = { "LimitExceededException": exceptions.LimitExceededException, "ResourceConflictException": exceptions.ResourceConflictException, + "InvalidConfigurationException": exceptions.InvalidConfigurationException, "TooManyRequestsException": exceptions.TooManyRequestsException, "InvalidParameterException": exceptions.InvalidParameterException, "ResourceNotFoundException": exceptions.ResourceNotFoundException, @@ -94,6 +100,7 @@ (dot). """ + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( identity_pool_id, identity_id, dataset_name) return self.make_request('DELETE', uri, expected_status=200) @@ -101,6 +108,11 @@ def describe_dataset(self, identity_pool_id, identity_id, dataset_name): """ Gets metadata about a dataset by identity and dataset name. + The credentials used to make this API call need to have access + to the identity data. With Amazon Cognito Sync, each identity + has access only to its own data. You should use Amazon Cognito + Identity service to retrieve the credentials necessary to make + this API call. :type identity_pool_id: string :param identity_pool_id: A name-spaced GUID (for example, us- @@ -118,6 +130,7 @@ (dot). """ + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( identity_pool_id, identity_id, dataset_name) return self.make_request('GET', uri, expected_status=200) @@ -133,6 +146,7 @@ Cognito. GUID generation is unique within a region. """ + uri = '/identitypools/{0}'.format(identity_pool_id) return self.make_request('GET', uri, expected_status=200) @@ -152,14 +166,34 @@ Cognito. GUID generation is unique within a region. """ + uri = '/identitypools/{0}/identities/{1}'.format( identity_pool_id, identity_id) return self.make_request('GET', uri, expected_status=200) + def get_identity_pool_configuration(self, identity_pool_id): + """ + Gets the configuration settings of an identity pool. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool for which to return a + configuration. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + return self.make_request('GET', uri, expected_status=200) + def list_datasets(self, identity_pool_id, identity_id, next_token=None, max_results=None): """ - Lists datasets for an identity. + Lists datasets for an identity. The credentials used to make + this API call need to have access to the identity data. With + Amazon Cognito Sync, each identity has access only to its own + data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. :type identity_pool_id: string :param identity_pool_id: A name-spaced GUID (for example, us- @@ -179,12 +213,19 @@ :param max_results: The maximum number of results to be returned. """ + uri = '/identitypools/{0}/identities/{1}/datasets'.format( identity_pool_id, identity_id) params = {} headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results return self.make_request('GET', uri, expected_status=200, - data=json.dumps(params), headers=headers) + data=json.dumps(params), headers=headers, + params=query_params) def list_identity_pool_usage(self, next_token=None, max_results=None): """ @@ -198,18 +239,29 @@ :param max_results: The maximum number of results to be returned. """ + uri = '/identitypools' params = {} headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results return self.make_request('GET', uri, expected_status=200, - data=json.dumps(params), headers=headers) + data=json.dumps(params), headers=headers, + params=query_params) def list_records(self, identity_pool_id, identity_id, dataset_name, last_sync_count=None, next_token=None, max_results=None, sync_session_token=None): """ Gets paginated records, optionally changed after a particular - sync count for a dataset and identity. + sync count for a dataset and identity. The credentials used to + make this API call need to have access to the identity data. + With Amazon Cognito Sync, each identity has access only to its + own data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. :type identity_pool_id: string :param identity_pool_id: A name-spaced GUID (for example, us- @@ -241,19 +293,142 @@ ID, and expiration. """ + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/records'.format( identity_pool_id, identity_id, dataset_name) params = {} headers = {} + query_params = {} + if last_sync_count is not None: + query_params['lastSyncCount'] = last_sync_count + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + if sync_session_token is not None: + query_params['syncSessionToken'] = sync_session_token return self.make_request('GET', uri, expected_status=200, - data=json.dumps(params), headers=headers) + data=json.dumps(params), headers=headers, + params=query_params) + + def register_device(self, identity_pool_id, identity_id, platform, token): + """ + Registers a device to receive push sync notifications. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. Here, the ID of the pool that the identity belongs to. + + :type identity_id: string + :param identity_id: The unique ID for this identity. + + :type platform: string + :param platform: The SNS platform type (e.g. GCM, SDM, APNS, + APNS_SANDBOX). + + :type token: string + :param token: The push token. + + """ + + uri = '/identitypools/{0}/identity/{1}/device'.format( + identity_pool_id, identity_id) + params = {'Platform': platform, 'Token': token, } + headers = {} + query_params = {} + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def set_identity_pool_configuration(self, identity_pool_id, + push_sync=None): + """ + Sets the necessary configuration for push sync. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool to modify. + + :type push_sync: dict + :param push_sync: Configuration options to be applied to the identity + pool. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + params = {} + headers = {} + query_params = {} + if push_sync is not None: + params['PushSync'] = push_sync + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def subscribe_to_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Subscribes to receive notifications when a dataset is modified + by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which the identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset to subcribe to. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('POST', uri, expected_status=200) + + def unsubscribe_from_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Unsubscribe from receiving notifications when a dataset is + modified by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which this identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset from which to unsubcribe. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('DELETE', uri, expected_status=200) def update_records(self, identity_pool_id, identity_id, dataset_name, - sync_session_token, record_patches=None, - client_context=None): + sync_session_token, device_id=None, + record_patches=None, client_context=None): """ Posts updates to records and add and delete records for a - dataset and user. + dataset and user. The credentials used to make this API call + need to have access to the identity data. With Amazon Cognito + Sync, each identity has access only to its own data. You + should use Amazon Cognito Identity service to retrieve the + credentials necessary to make this API call. :type identity_pool_id: string :param identity_pool_id: A name-spaced GUID (for example, us- @@ -270,27 +445,39 @@ characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot). + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + :type record_patches: list - :param record_patches: + :param record_patches: A list of patch operations. :type sync_session_token: string :param sync_session_token: The SyncSessionToken returned by a previous call to ListRecords for this dataset and identity. :type client_context: string - :param client_context: + :param client_context: Intended to supply a device ID that will + populate the `lastModifiedBy` field referenced in other methods. + The `ClientContext` field is not yet implemented. """ + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( identity_pool_id, identity_id, dataset_name) params = {'SyncSessionToken': sync_session_token, } headers = {} + query_params = {} + if device_id is not None: + params['DeviceId'] = device_id if record_patches is not None: params['RecordPatches'] = record_patches if client_context is not None: headers['x-amz-Client-Context'] = client_context + if client_context is not None: + headers['x-amz-Client-Context'] = client_context return self.make_request('POST', uri, expected_status=200, - data=json.dumps(params), headers=headers) + data=json.dumps(params), headers=headers, + params=query_params) def make_request(self, verb, resource, headers=None, data='', expected_status=None, params=None): diff -Nru python-boto-2.34.0/boto/configservice/exceptions.py python-boto-2.38.0/boto/configservice/exceptions.py --- python-boto-2.34.0/boto/configservice/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/configservice/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,103 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidLimitException(BotoServerError): + pass + + +class NoSuchBucketException(BotoServerError): + pass + + +class InvalidSNSTopicARNException(BotoServerError): + pass + + +class ResourceNotDiscoveredException(BotoServerError): + pass + + +class MaxNumberOfDeliveryChannelsExceededException(BotoServerError): + pass + + +class LastDeliveryChannelDeleteFailedException(BotoServerError): + pass + + +class InsufficientDeliveryPolicyException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class NoSuchDeliveryChannelException(BotoServerError): + pass + + +class NoSuchConfigurationRecorderException(BotoServerError): + pass + + +class InvalidS3KeyPrefixException(BotoServerError): + pass + + +class InvalidDeliveryChannelNameException(BotoServerError): + pass + + +class NoRunningConfigurationRecorderException(BotoServerError): + pass + + +class ValidationException(BotoServerError): + pass + + +class NoAvailableConfigurationRecorderException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidConfigurationRecorderNameException(BotoServerError): + pass + + +class NoAvailableDeliveryChannelException(BotoServerError): + pass + + +class MaxNumberOfConfigurationRecordersExceededException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/configservice/__init__.py python-boto-2.38.0/boto/configservice/__init__.py --- python-boto-2.34.0/boto/configservice/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/configservice/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Config service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.configservice.layer1 import ConfigServiceConnection + return get_regions('configservice', connection_cls=ConfigServiceConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/configservice/layer1.py python-boto-2.38.0/boto/configservice/layer1.py --- python-boto-2.34.0/boto/configservice/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/configservice/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,381 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.configservice import exceptions + + +class ConfigServiceConnection(AWSQueryConnection): + """ + AWS Config + AWS Config provides a way to keep track of the configurations of + all the AWS resources associated with your AWS account. You can + use AWS Config to get the current and historical configurations of + each AWS resource and also to get information about the + relationship between the resources. An AWS resource can be an + Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store + (EBS) volume, an Elastic network Interface (ENI), or a security + group. For a complete list of resources currently supported by AWS + Config, see `Supported AWS Resources`_. + + You can access and manage AWS Config through the AWS Management + Console, the AWS Command Line Interface (AWS CLI), the AWS Config + API, or the AWS SDKs for AWS Config + + This reference guide contains documentation for the AWS Config API + and the AWS CLI commands that you can use to manage AWS Config. + + The AWS Config API uses the Signature Version 4 protocol for + signing requests. For more information about how to sign a request + with this protocol, see `Signature Version 4 Signing Process`_. + + For detailed information about AWS Config features and their + associated actions or commands, as well as how to work with AWS + Management Console, see `What Is AWS Config?`_ in the AWS Config + Developer Guide . + """ + APIVersion = "2014-11-12" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "config.us-east-1.amazonaws.com" + ServiceName = "ConfigService" + TargetPrefix = "StarlingDoveService" + ResponseError = JSONResponseError + + _faults = { + "InvalidLimitException": exceptions.InvalidLimitException, + "NoSuchBucketException": exceptions.NoSuchBucketException, + "InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException, + "ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException, + "MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException, + "LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException, + "InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException, + "InvalidRoleException": exceptions.InvalidRoleException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException, + "NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException, + "InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException, + "InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException, + "NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException, + "ValidationException": exceptions.ValidationException, + "NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException, + "NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException, + "MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(ConfigServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def delete_delivery_channel(self, delivery_channel_name): + """ + Deletes the specified delivery channel. + + The delivery channel cannot be deleted if it is the only + delivery channel and the configuration recorder is still + running. To delete the delivery channel, stop the running + configuration recorder using the StopConfigurationRecorder + action. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel to + delete. + + """ + params = {'DeliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeleteDeliveryChannel', + body=json.dumps(params)) + + def deliver_config_snapshot(self, delivery_channel_name): + """ + Schedules delivery of a configuration snapshot to the Amazon + S3 bucket in the specified delivery channel. After the + delivery has started, AWS Config sends following notifications + using an Amazon SNS topic that you have specified. + + + + Notification of starting the delivery. + + Notification of delivery completed, if the delivery was + successfully completed. + + Notification of delivery failure, if the delivery failed to + complete. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel through + which the snapshot is delivered. + + """ + params = {'deliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeliverConfigSnapshot', + body=json.dumps(params)) + + def describe_configuration_recorder_status(self, + configuration_recorder_names=None): + """ + Returns the current status of the specified configuration + recorder. If a configuration recorder is not specified, this + action returns the status of all configuration recorder + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: The name(s) of the configuration + recorder. If the name is not specified, the action returns the + current status of all the configuration recorders associated with + the account. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorderStatus', + body=json.dumps(params)) + + def describe_configuration_recorders(self, + configuration_recorder_names=None): + """ + Returns the name of one or more specified configuration + recorders. If the recorder name is not specified, this action + returns the names of all the configuration recorders + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: A list of configuration recorder + names. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorders', + body=json.dumps(params)) + + def describe_delivery_channel_status(self, delivery_channel_names=None): + """ + Returns the current status of the specified delivery channel. + If a delivery channel is not specified, this action returns + the current status of all delivery channels associated with + the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannelStatus', + body=json.dumps(params)) + + def describe_delivery_channels(self, delivery_channel_names=None): + """ + Returns details about the specified delivery channel. If a + delivery channel is not specified, this action returns the + details of all delivery channels associated with the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannels', + body=json.dumps(params)) + + def get_resource_config_history(self, resource_type, resource_id, + later_time=None, earlier_time=None, + chronological_order=None, limit=None, + next_token=None): + """ + Returns a list of configuration items for the specified + resource. The list contains details about each state of the + resource during the specified time interval. You can specify a + `limit` on the number of results returned on the page. If a + limit is specified, a `nextToken` is returned as part of the + result that you can use to continue this request. + + :type resource_type: string + :param resource_type: The resource type. + + :type resource_id: string + :param resource_id: The ID of the resource (for example., `sg-xxxxxx`). + + :type later_time: timestamp + :param later_time: The time stamp that indicates a later time. If not + specified, current time is taken. + + :type earlier_time: timestamp + :param earlier_time: The time stamp that indicates an earlier time. If + not specified, the action returns paginated results that contain + configuration items that start from when the first configuration + item was recorded. + + :type chronological_order: string + :param chronological_order: The chronological order for configuration + items listed. By default the results are listed in reverse + chronological order. + + :type limit: integer + :param limit: The maximum number of configuration items returned in + each page. The default is 10. You cannot specify a limit greater + than 100. + + :type next_token: string + :param next_token: An optional parameter used for pagination of the + results. + + """ + params = { + 'resourceType': resource_type, + 'resourceId': resource_id, + } + if later_time is not None: + params['laterTime'] = later_time + if earlier_time is not None: + params['earlierTime'] = earlier_time + if chronological_order is not None: + params['chronologicalOrder'] = chronological_order + if limit is not None: + params['limit'] = limit + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='GetResourceConfigHistory', + body=json.dumps(params)) + + def put_configuration_recorder(self, configuration_recorder): + """ + Creates a new configuration recorder to record the resource + configurations. + + You can use this action to change the role ( `roleARN`) of an + existing recorder. To change the role, call the action on the + existing configuration recorder and specify a role. + + :type configuration_recorder: dict + :param configuration_recorder: The configuration recorder object that + records each configuration change made to the resources. + + """ + params = {'ConfigurationRecorder': configuration_recorder, } + return self.make_request(action='PutConfigurationRecorder', + body=json.dumps(params)) + + def put_delivery_channel(self, delivery_channel): + """ + Creates a new delivery channel object to deliver the + configuration information to an Amazon S3 bucket, and to an + Amazon SNS topic. + + You can use this action to change the Amazon S3 bucket or an + Amazon SNS topic of the existing delivery channel. To change + the Amazon S3 bucket or an Amazon SNS topic, call this action + and specify the changed values for the S3 bucket and the SNS + topic. If you specify a different value for either the S3 + bucket or the SNS topic, this action will keep the existing + value for the parameter that is not changed. + + :type delivery_channel: dict + :param delivery_channel: The configuration delivery channel object that + delivers the configuration information to an Amazon S3 bucket, and + to an Amazon SNS topic. + + """ + params = {'DeliveryChannel': delivery_channel, } + return self.make_request(action='PutDeliveryChannel', + body=json.dumps(params)) + + def start_configuration_recorder(self, configuration_recorder_name): + """ + Starts recording configurations of all the resources + associated with the account. + + You must have created at least one delivery channel to + successfully start the configuration recorder. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StartConfigurationRecorder', + body=json.dumps(params)) + + def stop_configuration_recorder(self, configuration_recorder_name): + """ + Stops recording configurations of all the resources associated + with the account. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StopConfigurationRecorder', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff -Nru python-boto-2.34.0/boto/connection.py python-boto-2.38.0/boto/connection.py --- python-boto-2.34.0/boto/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -381,7 +381,7 @@ if 'Content-Length' not in self.headers: if 'Transfer-Encoding' not in self.headers or \ self.headers['Transfer-Encoding'] != 'chunked': - self.headers['Content-Length'] = len(self.body) + self.headers['Content-Length'] = str(len(self.body)) class HTTPResponse(http_client.HTTPResponse): @@ -903,7 +903,7 @@ boto.log.debug('Params: %s' % request.params) response = None body = None - e = None + ex = None if override_num_retries is None: num_retries = config.getint('Boto', 'num_retries', self.num_retries) else: @@ -931,7 +931,8 @@ # not include the port. if 's3' not in self._required_auth_capability(): if not getattr(self, 'anon', False): - self.set_host_header(request) + if not request.headers.get('Host'): + self.set_host_header(request) boto.log.debug('Final headers: %s' % request.headers) request.start_time = datetime.now() if callable(sender): @@ -1002,6 +1003,7 @@ connection = self.new_http_connection(request.host, request.port, self.is_secure) response = e.response + ex = e except self.http_exceptions as e: for unretryable in self.http_unretryable_exceptions: if isinstance(e, unretryable): @@ -1013,6 +1015,7 @@ e.__class__.__name__) connection = self.new_http_connection(request.host, request.port, self.is_secure) + ex = e time.sleep(next_sleep) i += 1 # If we made it here, it's because we have exhausted our retries @@ -1023,8 +1026,8 @@ self.request_hook.handle_request_data(request, response, error=True) if response: raise BotoServerError(response.status, response.reason, body) - elif e: - raise + elif ex: + raise ex else: msg = 'Please report this exception as a Boto Issue!' raise BotoClientError(msg) @@ -1084,7 +1087,7 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=None, debug=0, https_connection_factory=None, path='/', security_token=None, - validate_certs=True, profile_name=None): + validate_certs=True, profile_name=None, provider='aws'): super(AWSQueryConnection, self).__init__( host, aws_access_key_id, aws_secret_access_key, @@ -1093,7 +1096,8 @@ debug, https_connection_factory, path, security_token=security_token, validate_certs=validate_certs, - profile_name=profile_name) + profile_name=profile_name, + provider=provider) def _required_auth_capability(self): return [] diff -Nru python-boto-2.34.0/boto/dynamodb/layer2.py python-boto-2.38.0/boto/dynamodb/layer2.py --- python-boto-2.34.0/boto/dynamodb/layer2.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb/layer2.py 2015-04-09 18:57:51.000000000 +0000 @@ -26,7 +26,7 @@ from boto.dynamodb.item import Item from boto.dynamodb.batch import BatchList, BatchWriteList from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \ - LossyFloatDynamizer + LossyFloatDynamizer, NonBooleanDynamizer class TableGenerator(object): @@ -154,7 +154,7 @@ profile_name=profile_name) self.dynamizer = dynamizer() - def use_decimals(self): + def use_decimals(self, use_boolean=False): """ Use the ``decimal.Decimal`` type for encoding/decoding numeric types. @@ -164,7 +164,7 @@ """ # Eventually this should be made the default dynamizer. - self.dynamizer = Dynamizer() + self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer() def dynamize_attribute_updates(self, pending_updates): """ diff -Nru python-boto-2.34.0/boto/dynamodb/types.py python-boto-2.38.0/boto/dynamodb/types.py --- python-boto-2.34.0/boto/dynamodb/types.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb/types.py 2015-04-09 18:57:51.000000000 +0000 @@ -27,6 +27,7 @@ import base64 from decimal import (Decimal, DecimalException, Context, Clamped, Overflow, Inexact, Underflow, Rounded) +from collections import Mapping from boto.dynamodb.exceptions import DynamoDBNumberError from boto.compat import filter, map, six, long_type @@ -51,8 +52,12 @@ return result -def is_num(n): - types = (int, long_type, float, bool, Decimal) +def is_num(n, boolean_as_int=True): + if boolean_as_int: + types = (int, long_type, float, Decimal, bool) + else: + types = (int, long_type, float, Decimal) + return isinstance(n, types) or n in types @@ -94,15 +99,20 @@ return Binary(base64.b64decode(n)) -def get_dynamodb_type(val): +def get_dynamodb_type(val, use_boolean=True): """ Take a scalar Python value and return a string representing the corresponding Amazon DynamoDB type. If the value passed in is not a supported type, raise a TypeError. """ dynamodb_type = None - if is_num(val): - dynamodb_type = 'N' + if val is None: + dynamodb_type = 'NULL' + elif is_num(val): + if isinstance(val, bool) and use_boolean: + dynamodb_type = 'BOOL' + else: + dynamodb_type = 'N' elif is_str(val): dynamodb_type = 'S' elif isinstance(val, (set, frozenset)): @@ -114,6 +124,10 @@ dynamodb_type = 'BS' elif is_binary(val): dynamodb_type = 'B' + elif isinstance(val, Mapping): + dynamodb_type = 'M' + elif isinstance(val, list): + dynamodb_type = 'L' if dynamodb_type is None: msg = 'Unsupported type "%s" for value "%s"' % (type(val), val) raise TypeError(msg) @@ -301,6 +315,18 @@ def _encode_bs(self, attr): return [self._encode_b(n) for n in attr] + def _encode_null(self, attr): + return True + + def _encode_bool(self, attr): + return attr + + def _encode_m(self, attr): + return dict([(k, self.encode(v)) for k, v in attr.items()]) + + def _encode_l(self, attr): + return [self.encode(i) for i in attr] + def decode(self, attr): """ Takes the format returned by DynamoDB and constructs @@ -338,8 +364,29 @@ def _decode_bs(self, attr): return set(map(self._decode_b, attr)) + def _decode_null(self, attr): + return None + + def _decode_bool(self, attr): + return attr + + def _decode_m(self, attr): + return dict([(k, self.decode(v)) for k, v in attr.items()]) + + def _decode_l(self, attr): + return [self.decode(i) for i in attr] + + +class NonBooleanDynamizer(Dynamizer): + """Casting boolean type to numeric types. + + This class is provided for backward compatibility. + """ + def _get_dynamodb_type(self, attr): + return get_dynamodb_type(attr, use_boolean=False) + -class LossyFloatDynamizer(Dynamizer): +class LossyFloatDynamizer(NonBooleanDynamizer): """Use float/int instead of Decimal for numeric types. This class is provided for backwards compatibility. Instead of diff -Nru python-boto-2.34.0/boto/dynamodb2/items.py python-boto-2.38.0/boto/dynamodb2/items.py --- python-boto-2.34.0/boto/dynamodb2/items.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb2/items.py 2015-04-09 18:57:51.000000000 +0000 @@ -1,7 +1,5 @@ from copy import deepcopy -from boto.dynamodb2.types import Dynamizer - class NEWVALUE(object): # A marker for new data added. @@ -35,7 +33,8 @@ being table-level. It's also for persisting schema around many objects. Optionally accepts a ``data`` parameter, which should be a dictionary - of the fields & values of the item. + of the fields & values of the item. Alternatively, an ``Item`` instance + may be provided from which to extract the data. Optionally accepts a ``loaded`` parameter, which should be a boolean. ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if @@ -69,8 +68,10 @@ self._loaded = loaded self._orig_data = {} self._data = data - self._dynamizer = Dynamizer() + self._dynamizer = table._dynamizer + if isinstance(self._data, Item): + self._data = self._data._data if self._data is None: self._data = {} diff -Nru python-boto-2.34.0/boto/dynamodb2/layer1.py python-boto-2.38.0/boto/dynamodb2/layer1.py --- python-boto-2.34.0/boto/dynamodb2/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb2/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -31,7 +31,9 @@ class DynamoDBConnection(AWSQueryConnection): """ - Amazon DynamoDB **Overview** + Amazon DynamoDB + **Overview** + This is the Amazon DynamoDB API Reference. This guide provides descriptions and samples of the low-level DynamoDB API. For information about DynamoDB application development, go to the @@ -57,7 +59,6 @@ **Managing Tables** - + CreateTable - Creates a table with user-specified provisioned throughput settings. You must designate one attribute as the hash primary key for the table; you can optionally designate a second @@ -75,14 +76,12 @@ + DeleteTable - Deletes a table and all of its indexes. - For conceptual information about managing tables, go to `Working with Tables`_ in the Amazon DynamoDB Developer Guide . **Reading Data** - + GetItem - Returns a set of attributes for the item that has a given primary key. By default, GetItem performs an eventually consistent read; however, applications can specify a strongly @@ -106,7 +105,6 @@ case that requires predictable performance. - For conceptual information about reading data, go to `Working with Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB Developer Guide . @@ -114,7 +112,6 @@ **Modifying Data** - + PutItem - Creates a new item, or replaces an existing item with a new item (including all the attributes). By default, if an item in the table already exists with the same primary key, the new @@ -136,7 +133,6 @@ MB. - For conceptual information about modifying data, go to `Working with Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB Developer Guide . @@ -294,6 +290,11 @@ delete requests. Individual items to be written can be as large as 400 KB. + + BatchWriteItem cannot update items. To update items, use the + UpdateItem API. + + The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's @@ -433,10 +434,11 @@ DynamoDB sets the TableStatus to `ACTIVE`. You can perform read and write operations only on an `ACTIVE` table. - If you want to create multiple tables with secondary indexes - on them, you must create them sequentially. Only one table - with secondary indexes can be in the `CREATING` state at any - given time. + You can optionally define secondary indexes on the new table, + as part of the CreateTable operation. If you want to create + multiple tables with secondary indexes on them, you must + create the tables sequentially. Only one table with secondary + indexes can be in the `CREATING` state at any given time. You can use the DescribeTable API to check the table status. @@ -633,8 +635,8 @@ ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, - `a` is greater than `A`, and `aa` is greater than `B`. For a list - of code values, see + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when @@ -687,9 +689,19 @@ match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + `NULL` : The attribute does not exist. `NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + `CONTAINS` : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target @@ -760,7 +772,7 @@ the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false. - + Note that the default value for Exists is `True`. The Value and Exists parameters are incompatible with @@ -817,24 +829,25 @@ returned. :type condition_expression: string - :param condition_expression: - A condition that must be satisfied in order for a conditional - DeleteItem to succeed. - + :param condition_expression: A condition that must be satisfied in + order for a conditional DeleteItem to succeed. An expression can contain any of the following: - + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH` + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + Comparison operators: ` = | <> | < | > | <= | >= | BETWEEN | IN` - + Logical operators: `NOT | AND | OR` + + Logical operators: `AND | OR | NOT` - :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -856,37 +869,39 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` - - :type expression_attribute_values: map - :param expression_attribute_values: - One or more values that can be substituted in an expression. + + `#name = "Smith" OR #name = "Jones"` - Use the **:** character in an expression to dereference an attribute - value. For example, consider the following expression: + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . - + `ProductStatus IN ("Available","Backordered","Discontinued")` - - - Now suppose that you specified the following for - ExpressionAttributeValues : + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + `Available | Backordered | Discontinued` - + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"}, - "d":{"S":"Discontinued"} }` + You would first need to specify ExpressionAttributeValues as follows: + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` - The expression can now be simplified as follows: + You could then use these values in an expression, such as this: + `ProductStatus IN (:avail, :back, :disc)` - + `ProductStatus IN (:a,:b,:c)` + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . """ params = {'TableName': table_name, 'Key': key, } @@ -921,6 +936,12 @@ table is already in the `DELETING` state, no error is returned. + + DynamoDB might continue to accept data read and write + operations, such as GetItem and PutItem , on a table in the + `DELETING` state until the table deletion is complete. + + When you delete a table, any indexes on that table are also deleted. @@ -940,6 +961,14 @@ status of the table, when it was created, the primary key schema, and any indexes on the table. + + If you issue a DescribeTable request immediately after a + CreateTable request, DynamoDB might return a + ResourceNotFoundException. This is because DescribeTable uses + an eventually consistent query, and the metadata for your + table might not be available at that moment. Wait for a few + seconds, and then try the DescribeTable request again. + :type table_name: string :param table_name: The name of the table to describe. @@ -1006,20 +1035,21 @@ included in the response. :type projection_expression: string - :param projection_expression: One or more attributes to retrieve from - the table. These attributes can include scalars, sets, or elements - of a JSON document. The attributes in the expression must be - separated by commas. + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. - :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -1041,13 +1071,17 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . """ params = {'TableName': table_name, 'Key': key, } @@ -1120,6 +1154,12 @@ item (after the update). For more information, see the ReturnValues description below. + + To prevent a new item from replacing an existing item, use a + conditional put operation with ComparisonOperator set to + `NULL` for the primary key attribute, or attributes. + + For more information about using this API, see `Working with Items`_ in the Amazon DynamoDB Developer Guide . @@ -1179,8 +1219,8 @@ ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, - `a` is greater than `A`, and `aa` is greater than `B`. For a list - of code values, see + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when @@ -1233,9 +1273,19 @@ match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + `NULL` : The attribute does not exist. `NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + `CONTAINS` : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target @@ -1306,7 +1356,7 @@ the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false. - + Note that the default value for Exists is `True`. The Value and Exists parameters are incompatible with @@ -1364,24 +1414,25 @@ The operation will succeed only if the entire map evaluates to true. :type condition_expression: string - :param condition_expression: - A condition that must be satisfied in order for a conditional PutItem - operation to succeed. - + :param condition_expression: A condition that must be satisfied in + order for a conditional PutItem operation to succeed. An expression can contain any of the following: - + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH` + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + Comparison operators: ` = | <> | < | > | <= | >= | BETWEEN | IN` - + Logical operators: `NOT | AND | OR` + + Logical operators: `AND | OR | NOT` - :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -1403,37 +1454,39 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` - - :type expression_attribute_values: map - :param expression_attribute_values: - One or more values that can be substituted in an expression. - - Use the **:** character in an expression to dereference an attribute - value. For example, consider the following expression: + + `#name = "Smith" OR #name = "Jones"` - + `ProductStatus IN ("Available","Backordered","Discontinued")` + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: - Now suppose that you specified the following for - ExpressionAttributeValues : - + `Available | Backordered | Discontinued` - + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"}, - "d":{"S":"Discontinued"} }` + You would first need to specify ExpressionAttributeValues as follows: + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` - The expression can now be simplified as follows: + You could then use these values in an expression, such as this: + `ProductStatus IN (:avail, :back, :disc)` - + `ProductStatus IN (:a,:b,:c)` + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . """ params = {'TableName': table_name, 'Item': item, } @@ -1601,7 +1654,9 @@ query on a table, you can have conditions only on the table primary key attributes. You must specify the hash key attribute name and value as an `EQ` condition. You can optionally specify a second - condition, referring to the range key attribute. + condition, referring to the range key attribute. If you do not + specify a range key condition, all items under the hash key will be + fetched and processed. Any filters will applied after this. For a query on an index, you can have conditions only on the index key attributes. You must specify the index hash attribute name and value as an EQ condition. You can optionally specify a second @@ -1616,8 +1671,8 @@ ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, - `a` is greater than `A`, and `aa` is greater than `B`. For a list - of code values, see + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when @@ -1687,9 +1742,10 @@ This parameter does not support lists or maps. - A condition that evaluates the query results and returns only the - desired values. - + A condition that evaluates the query results after the items are read + and returns only the desired values. + Query filters are applied after the items are read, so they do not + limit the capacity used. If you specify more than one condition in the QueryFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the @@ -1697,6 +1753,11 @@ do this, then at least one of the conditions must evaluate to true, rather than all of them.) + + QueryFilter does not allow key attributes. You cannot define a filter + condition on a hash key or range key. + + Each QueryFilter element consists of an attribute name to compare, along with the following: @@ -1706,7 +1767,7 @@ operator specified in ComparisonOperator . For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For - example, `a` is greater than `A`, and `aa` is greater than `B`. For + example, `a` is greater than `A`, and `a` is greater than `B`. For a list of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For type Binary, DynamoDB treats each byte of the binary data as @@ -1723,11 +1784,6 @@ :type conditional_operator: string :param conditional_operator: - There is a newer parameter available. Use ConditionExpression instead. - Note that if you use ConditionalOperator and ConditionExpression at - the same time, DynamoDB will return a ValidationException - exception. - This parameter does not support lists or maps. A logical operator to apply to the conditions in the QueryFilter map: @@ -1769,26 +1825,32 @@ included in the response. :type projection_expression: string - :param projection_expression: One or more attributes to retrieve from - the table. These attributes can include scalars, sets, or elements - of a JSON document. The attributes in the expression must be - separated by commas. + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + :type filter_expression: string :param filter_expression: A condition that evaluates the query results - and returns only the desired values. + after the items are read and returns only the desired values. The condition you specify is applied to the items queried; any items that do not match the expression are not returned. + Filter expressions are applied after the items are read, so they do not + limit the capacity used. + A FilterExpression has the same syntax as a ConditionExpression . For + more information on expression syntax, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: - + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -1810,37 +1872,39 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` - - :type expression_attribute_values: map - :param expression_attribute_values: - One or more values that can be substituted in an expression. - - Use the **:** character in an expression to dereference an attribute - value. For example, consider the following expression: - + + `#name = "Smith" OR #name = "Jones"` - + `ProductStatus IN ("Available","Backordered","Discontinued")` + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . - Now suppose that you specified the following for - ExpressionAttributeValues : + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + `Available | Backordered | Discontinued` - + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"}, - "d":{"S":"Discontinued"} }` + You would first need to specify ExpressionAttributeValues as follows: + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` - The expression can now be simplified as follows: + You could then use these values in an expression, such as this: + `ProductStatus IN (:avail, :back, :disc)` - + `ProductStatus IN (:a,:b,:c)` + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . """ params = { @@ -1988,7 +2052,7 @@ operator specified in ComparisonOperator . For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For - example, `a` is greater than `A`, and `aa` is greater than `B`. For + example, `a` is greater than `A`, and `a` is greater than `B`. For a list of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For Binary, DynamoDB treats each byte of the binary data as @@ -2074,14 +2138,17 @@ If you specify Segment , you must also specify TotalSegments . :type projection_expression: string - :param projection_expression: One or more attributes to retrieve from - the table. These attributes can include scalars, sets, or elements - of a JSON document. The attributes in the expression must be - separated by commas. + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + :type filter_expression: string :param filter_expression: A condition that evaluates the scan results and returns only the desired values. @@ -2089,11 +2156,9 @@ that do not match the expression are not returned. :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: - + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -2115,37 +2180,39 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` - - :type expression_attribute_values: map - :param expression_attribute_values: - One or more values that can be substituted in an expression. - - Use the **:** character in an expression to dereference an attribute - value. For example, consider the following expression: - + + `#name = "Smith" OR #name = "Jones"` - + `ProductStatus IN ("Available","Backordered","Discontinued")` + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . - Now suppose that you specified the following for - ExpressionAttributeValues : + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + `Available | Backordered | Discontinued` - + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"}, - "d":{"S":"Discontinued"} }` + You would first need to specify ExpressionAttributeValues as follows: + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` - The expression can now be simplified as follows: + You could then use these values in an expression, such as this: + `ProductStatus IN (:avail, :back, :disc)` - + `ProductStatus IN (:a,:b,:c)` + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . """ params = {'TableName': table_name, } @@ -2255,7 +2322,17 @@ + If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing - attribute. + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses 0 as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute, with a + value of `3`. + If the existing data type is a set, and if Value is also a set, then Value is appended to the existing set. For example, if the attribute value is the set `[1,2]`, and the `ADD` action specified @@ -2271,8 +2348,10 @@ + `PUT` - Causes DynamoDB to create a new item with the specified primary key, and then adds the attribute. - + `DELETE` - Causes nothing to happen; there is no attribute to delete. - + `ADD` - Causes DynamoDB to creat an item with the supplied primary + + `DELETE` - Nothing happens, because attributes cannot be deleted from + a nonexistent item. The operation succeeds, but DynamoDB does not + create a new item. + + `ADD` - Causes DynamoDB to create an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are Number and Number Set. @@ -2317,8 +2396,8 @@ ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, - `a` is greater than `A`, and `aa` is greater than `B`. For a list - of code values, see + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when @@ -2371,9 +2450,19 @@ match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + `NULL` : The attribute does not exist. `NULL` is supported for all - datatypes, including lists and maps. + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + `CONTAINS` : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target @@ -2444,7 +2533,7 @@ the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false. - + Note that the default value for Exists is `True`. The Value and Exists parameters are incompatible with @@ -2508,10 +2597,9 @@ returned. :type update_expression: string - :param update_expression: - An expression that defines one or more attributes to be updated, the - action to be performed on them, and new value(s) for them. - + :param update_expression: An expression that defines one or more + attributes to be updated, the action to be performed on them, and + new value(s) for them. The following action values are available for UpdateExpression . @@ -2537,7 +2625,17 @@ + If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing - attribute. + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses `0` as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute in the + item, with a value of `3`. + If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set `[1,2]`, and the `ADD` action specified `[3]`, @@ -2563,33 +2661,29 @@ following: `SET a=:value1, b=:value2 DELETE :value3, :value4, :value5` - An expression can contain any of the following: - - - + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH` - + Comparison operators: ` = | <> | < | > | <= - | >= | BETWEEN | IN` - + Logical operators: `NOT | AND | OR` + For more information on update expressions, go to `Modifying Items and + Attributes`_ in the Amazon DynamoDB Developer Guide . :type condition_expression: string - :param condition_expression: - A condition that must be satisfied in order for a conditional update to - succeed. - + :param condition_expression: A condition that must be satisfied in + order for a conditional update to succeed. An expression can contain any of the following: - + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH` + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + Comparison operators: ` = | <> | < | > | <= | >= | BETWEEN | IN` - + Logical operators: `NOT | AND | OR` + + Logical operators: `AND | OR | NOT` - :type expression_attribute_names: map - :param expression_attribute_names: - One or more substitution tokens for simplifying complex expressions. - The following are some use cases for an ExpressionAttributeNames - value: + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + To shorten an attribute name that is very long or unwieldy in an expression. @@ -2611,37 +2705,39 @@ ExpressionAttributeNames : - + `{"n":"order.customerInfo.LastName"}` + + `{"#name":"order.customerInfo.LastName"}` The expression can now be simplified as follows: - + `#n = "Smith" OR #n = "Jones"` - - :type expression_attribute_values: map - :param expression_attribute_values: - One or more values that can be substituted in an expression. + + `#name = "Smith" OR #name = "Jones"` - Use the **:** character in an expression to dereference an attribute - value. For example, consider the following expression: + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . - + `ProductStatus IN ("Available","Backordered","Discontinued")` - - - Now suppose that you specified the following for - ExpressionAttributeValues : + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + `Available | Backordered | Discontinued` - + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"}, - "d":{"S":"Discontinued"} }` + You would first need to specify ExpressionAttributeValues as follows: + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` - The expression can now be simplified as follows: + You could then use these values in an expression, such as this: + `ProductStatus IN (:avail, :back, :disc)` - + `ProductStatus IN (:a,:b,:c)` + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . """ params = {'TableName': table_name, 'Key': key, } @@ -2669,28 +2765,35 @@ body=json.dumps(params)) def update_table(self, table_name, provisioned_throughput=None, - global_secondary_index_updates=None): + global_secondary_index_updates=None, + attribute_definitions=None): """ - Updates the provisioned throughput for the given table. - Setting the throughput for a table helps you manage - performance and is part of the provisioned throughput feature - of DynamoDB. - - The provisioned throughput values can be upgraded or - downgraded based on the maximums and minimums listed in the - `Limits`_ section in the Amazon DynamoDB Developer Guide . + Updates the provisioned throughput for the given table, or + manages the global secondary indexes on the table. + + You can increase or decrease the table's provisioned + throughput values within the maximums and minimums listed in + the `Limits`_ section in the Amazon DynamoDB Developer Guide . + + In addition, you can use UpdateTable to add, modify or delete + global secondary indexes on the table. For more information, + see `Managing Global Secondary Indexes`_ in the Amazon + DynamoDB Developer Guide . - The table must be in the `ACTIVE` state for this operation to + The table must be in the `ACTIVE` state for UpdateTable to succeed. UpdateTable is an asynchronous operation; while executing the operation, the table is in the `UPDATING` state. While the table is in the `UPDATING` state, the table still - has the provisioned throughput from before the call. The new - provisioned throughput setting is in effect only when the - table returns to the `ACTIVE` state after the UpdateTable - operation. + has the provisioned throughput from before the call. The + table's new provisioned throughput settings go into effect + when the table returns to the `ACTIVE` state; at that point, + the UpdateTable operation is complete. - You cannot add, modify or delete indexes using UpdateTable . - Indexes can only be defined at table creation time. + :type attribute_definitions: list + :param attribute_definitions: An array of attributes that describe the + key schema for the table and indexes. If you are adding a new + global secondary index to the table, AttributeDefinitions must + include the key element(s) of the new index. :type table_name: string :param table_name: The name of the table to be updated. @@ -2703,12 +2806,20 @@ `Limits`_ in the Amazon DynamoDB Developer Guide . :type global_secondary_index_updates: list - :param global_secondary_index_updates: An array of one or more global - secondary indexes on the table, together with provisioned - throughput settings for each index. + :param global_secondary_index_updates: + An array of one or more global secondary indexes for the table. For + each index in the array, you can specify one action: + + + + Create - add a new global secondary index to the table. + + Update - modify the provisioned throughput settings of an existing + global secondary index. + + Delete - remove a global secondary index from the table. """ params = {'TableName': table_name, } + if attribute_definitions is not None: + params['AttributeDefinitions'] = attribute_definitions if provisioned_throughput is not None: params['ProvisionedThroughput'] = provisioned_throughput if global_secondary_index_updates is not None: diff -Nru python-boto-2.34.0/boto/dynamodb2/results.py python-boto-2.38.0/boto/dynamodb2/results.py --- python-boto-2.34.0/boto/dynamodb2/results.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb2/results.py 2015-04-09 18:57:51.000000000 +0000 @@ -180,10 +180,12 @@ kwargs['keys'] = self._keys_left[:self._max_batch_get] self._keys_left = self._keys_left[self._max_batch_get:] + if len(self._keys_left) <= 0: + self._results_left = False + results = self.the_callable(*args, **kwargs) if not len(results.get('results', [])): - self._results_left = False return self._results.extend(results['results']) @@ -194,8 +196,8 @@ # missing keys ever making it here. self._keys_left.insert(offset, key_data) - if len(self._keys_left) <= 0: - self._results_left = False + if len(self._keys_left) > 0: + self._results_left = True # Decrease the limit, if it's present. if self.call_kwargs.get('limit'): diff -Nru python-boto-2.34.0/boto/dynamodb2/table.py python-boto-2.38.0/boto/dynamodb2/table.py --- python-boto-2.34.0/boto/dynamodb2/table.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb2/table.py 2015-04-09 18:57:51.000000000 +0000 @@ -7,8 +7,8 @@ from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet -from boto.dynamodb2.types import (Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS, - STRING) +from boto.dynamodb2.types import (NonBooleanDynamizer, Dynamizer, FILTER_OPERATORS, + QUERY_OPERATORS, STRING) from boto.exception import JSONResponseError @@ -24,6 +24,18 @@ """ max_batch_get = 100 + _PROJECTION_TYPE_TO_INDEX = dict( + global_indexes=dict( + ALL=GlobalAllIndex, + KEYS_ONLY=GlobalKeysOnlyIndex, + INCLUDE=GlobalIncludeIndex, + ), local_indexes=dict( + ALL=AllIndex, + KEYS_ONLY=KeysOnlyIndex, + INCLUDE=IncludeIndex, + ) + ) + def __init__(self, table_name, schema=None, throughput=None, indexes=None, global_indexes=None, connection=None): """ @@ -109,6 +121,9 @@ if throughput is not None: self.throughput = throughput + self._dynamizer = NonBooleanDynamizer() + + def use_boolean(self): self._dynamizer = Dynamizer() @classmethod @@ -122,9 +137,9 @@ to define the key structure of the table. **IMPORTANT** - You should consider the usage pattern of your table - up-front, as the schema & indexes can **NOT** be modified once the - table is created, requiring the creation of a new table & migrating - the data should you wish to revise it. + up-front, as the schema can **NOT** be modified once the table is + created, requiring the creation of a new table & migrating the data + should you wish to revise it. **IMPORTANT** - If the table already exists in DynamoDB, additional calls to this method will result in an error. If you just need @@ -264,25 +279,25 @@ return schema - def _introspect_indexes(self, raw_indexes): + def _introspect_all_indexes(self, raw_indexes, map_indexes_projection): """ - Given a raw index structure back from a DynamoDB response, parse - out & build the high-level Python objects that represent them. + Given a raw index/global index structure back from a DynamoDB response, + parse out & build the high-level Python objects that represent them. """ indexes = [] for field in raw_indexes: - index_klass = AllIndex + index_klass = map_indexes_projection.get('ALL') kwargs = { 'parts': [] } if field['Projection']['ProjectionType'] == 'ALL': - index_klass = AllIndex + index_klass = map_indexes_projection.get('ALL') elif field['Projection']['ProjectionType'] == 'KEYS_ONLY': - index_klass = KeysOnlyIndex + index_klass = map_indexes_projection.get('KEYS_ONLY') elif field['Projection']['ProjectionType'] == 'INCLUDE': - index_klass = IncludeIndex + index_klass = map_indexes_projection.get('INCLUDE') kwargs['includes'] = field['Projection']['NonKeyAttributes'] else: raise exceptions.UnknownIndexFieldError( @@ -297,16 +312,33 @@ return indexes + def _introspect_indexes(self, raw_indexes): + """ + Given a raw index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_indexes, self._PROJECTION_TYPE_TO_INDEX.get('local_indexes')) + + def _introspect_global_indexes(self, raw_global_indexes): + """ + Given a raw global index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_global_indexes, + self._PROJECTION_TYPE_TO_INDEX.get('global_indexes')) + def describe(self): """ Describes the current structure of the table in DynamoDB. - This information will be used to update the ``schema``, ``indexes`` - and ``throughput`` information on the ``Table``. Some calls, such as - those involving creating keys or querying, will require this - information to be populated. + This information will be used to update the ``schema``, ``indexes``, + ``global_indexes`` and ``throughput`` information on the ``Table``. Some + calls, such as those involving creating keys or querying, will require + this information to be populated. - It also returns the full raw datastructure from DynamoDB, in the + It also returns the full raw data structure from DynamoDB, in the event you'd like to parse out additional information (such as the ``ItemCount`` or usage information). @@ -339,20 +371,27 @@ raw_indexes = result['Table'].get('LocalSecondaryIndexes', []) self.indexes = self._introspect_indexes(raw_indexes) + # Build the global index information as well. + raw_global_indexes = result['Table'].get('GlobalSecondaryIndexes', []) + self.global_indexes = self._introspect_global_indexes(raw_global_indexes) + # This is leaky. return result - def update(self, throughput, global_indexes=None): + def update(self, throughput=None, global_indexes=None): """ - Updates table attributes in DynamoDB. - - Currently, the only thing you can modify about a table after it has - been created is the throughput. + Updates table attributes and global indexes in DynamoDB. - Requires a ``throughput`` parameter, which should be a + Optionally accepts a ``throughput`` parameter, which should be a dictionary. If provided, it should specify a ``read`` & ``write`` key, both of which should have an integer value associated with them. + Optionally accepts a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. If you are writing + new code, please use ``Table.update_global_secondary_index``. + Returns ``True`` on success. Example:: @@ -376,13 +415,17 @@ ... } ... }) True - """ - self.throughput = throughput - data = { - 'ReadCapacityUnits': int(self.throughput['read']), - 'WriteCapacityUnits': int(self.throughput['write']), - } + + data = None + + if throughput: + self.throughput = throughput + data = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + gsi_data = None if global_indexes: @@ -399,12 +442,170 @@ }, }) - self.connection.update_table( - self.table_name, - provisioned_throughput=data, - global_secondary_index_updates=gsi_data - ) - return True + if throughput or global_indexes: + self.connection.update_table( + self.table_name, + provisioned_throughput=data, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide either the throughput or the ' \ + 'global_indexes to update method' + boto.log.error(msg) + + return False + + def create_global_secondary_index(self, global_index): + """ + Creates a global index in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + ``GlobalBaseIndexField`` subclass representing the desired index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To create a global index + >>> users.create_global_secondary_index( + ... global_index=GlobalAllIndex( + ... 'TheIndexNameHere', parts=[ + ... HashKey('requiredHashkey', data_type=STRING), + ... RangeKey('optionalRangeKey', data_type=STRING) + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + ... ) + True + + """ + + if global_index: + gsi_data = [] + gsi_data_attr_def = [] + + gsi_data.append({ + "Create": global_index.schema() + }) + + for attr_def in global_index.parts: + gsi_data_attr_def.append(attr_def.definition()) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + attribute_definitions=gsi_data_attr_def + ) + + return True + else: + msg = 'You need to provide the global_index to ' \ + 'create_global_secondary_index method' + boto.log.error(msg) + + return False + + def delete_global_secondary_index(self, global_index_name): + """ + Deletes a global index in DynamoDB after the table has been created. + + Requires a ``global_index_name`` parameter, which should be a simple + string of the name of the global secondary index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To delete a global index + >>> users.delete_global_secondary_index('TheIndexNameHere') + True + + """ + + if global_index_name: + gsi_data = [ + { + "Delete": { + "IndexName": global_index_name + } + } + ] + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide the global index name to ' \ + 'delete_global_secondary_index method' + boto.log.error(msg) + + return False + + def update_global_secondary_index(self, global_indexes): + """ + Updates a global index(es) in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To update a global index + >>> users.update_global_secondary_index(global_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + + """ + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + return True + else: + msg = 'You need to provide the global indexes to ' \ + 'update_global_secondary_index method' + boto.log.error(msg) + + return False def delete(self): """ @@ -1003,7 +1204,7 @@ def query_count(self, index=None, consistent=False, conditional_operator=None, query_filter=None, scan_index_forward=True, limit=None, - **filter_kwargs): + exclusive_start_key=None, **filter_kwargs): """ Queries the exact count of matching items in a DynamoDB table. @@ -1034,6 +1235,9 @@ + `AND` - True if all filter conditions evaluate to true (default) + `OR` - True if at least one filter condition evaluates to true + Optionally accept a ``exclusive_start_key`` which is used to get + the remaining items when a query cannot return the complete count. + Returns an integer which represents the exact amount of matched items. @@ -1079,18 +1283,29 @@ using=FILTER_OPERATORS ) - raw_results = self.connection.query( - self.table_name, - index_name=index, - consistent_read=consistent, - select='COUNT', - key_conditions=key_conditions, - query_filter=built_query_filter, - conditional_operator=conditional_operator, - limit=limit, - scan_index_forward=scan_index_forward, - ) - return int(raw_results.get('Count', 0)) + count_buffer = 0 + last_evaluated_key = exclusive_start_key + + while True: + raw_results = self.connection.query( + self.table_name, + index_name=index, + consistent_read=consistent, + select='COUNT', + key_conditions=key_conditions, + query_filter=built_query_filter, + conditional_operator=conditional_operator, + limit=limit, + scan_index_forward=scan_index_forward, + exclusive_start_key=last_evaluated_key + ) + + count_buffer += int(raw_results.get('Count', 0)) + last_evaluated_key = raw_results.get('LastEvaluatedKey') + if not last_evaluated_key or count_buffer < 1: + break + + return count_buffer def _query(self, limit=None, index=None, reverse=False, consistent=False, exclusive_start_key=None, select=None, attributes_to_get=None, diff -Nru python-boto-2.34.0/boto/dynamodb2/types.py python-boto-2.38.0/boto/dynamodb2/types.py --- python-boto-2.34.0/boto/dynamodb2/types.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/dynamodb2/types.py 2015-04-09 18:57:51.000000000 +0000 @@ -1,7 +1,7 @@ # Shadow the DynamoDB v1 bits. # This way, no end user should have to cross-import between versions & we # reserve the namespace to extend v2 if it's ever needed. -from boto.dynamodb.types import Dynamizer +from boto.dynamodb.types import NonBooleanDynamizer, Dynamizer # Some constants for our use. @@ -11,6 +11,10 @@ STRING_SET = 'SS' NUMBER_SET = 'NS' BINARY_SET = 'BS' +NULL = 'NULL' +BOOLEAN = 'BOOL' +MAP = 'M' +LIST = 'L' QUERY_OPERATORS = { 'eq': 'EQ', diff -Nru python-boto-2.34.0/boto/ec2/autoscale/__init__.py python-boto-2.38.0/boto/ec2/autoscale/__init__.py --- python-boto-2.34.0/boto/ec2/autoscale/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/autoscale/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -192,6 +192,27 @@ self.build_list_params(params, instance_ids, 'InstanceIds') return self.get_status('AttachInstances', params) + def detach_instances(self, name, instance_ids, decrement_capacity=True): + """ + Detach instances from an Auto Scaling group. + + :type name: str + :param name: The name of the Auto Scaling group from which to detach instances. + + :type instance_ids: list + :param instance_ids: Instance ids to be detached from the Auto Scaling group. + + :type decrement_capacity: bool + :param decrement_capacity: Whether to decrement the size of the + Auto Scaling group or not. + """ + + params = {'AutoScalingGroupName': name} + params['ShouldDecrementDesiredCapacity'] = 'true' if decrement_capacity else 'false' + + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('DetachInstances', params) + def create_auto_scaling_group(self, as_group): """ Create auto scaling group. @@ -259,6 +280,14 @@ params['DeleteOnTermination'] = 'false' if launch_config.iops: params['Iops'] = launch_config.iops + if launch_config.classic_link_vpc_id: + params['ClassicLinkVPCId'] = launch_config.classic_link_vpc_id + if launch_config.classic_link_vpc_security_groups: + self.build_list_params( + params, + launch_config.classic_link_vpc_security_groups, + 'ClassicLinkVPCSecurityGroups' + ) return self.get_object('CreateLaunchConfiguration', params, Request, verb='POST') diff -Nru python-boto-2.34.0/boto/ec2/autoscale/launchconfig.py python-boto-2.38.0/boto/ec2/autoscale/launchconfig.py --- python-boto-2.34.0/boto/ec2/autoscale/launchconfig.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/autoscale/launchconfig.py 2015-04-09 18:57:51.000000000 +0000 @@ -103,7 +103,9 @@ instance_monitoring=False, spot_price=None, instance_profile_name=None, ebs_optimized=False, associate_public_ip_address=None, volume_type=None, - delete_on_termination=True, iops=None, use_block_device_types=False): + delete_on_termination=True, iops=None, + use_block_device_types=False, classic_link_vpc_id=None, + classic_link_vpc_security_groups=None): """ A launch configuration. @@ -159,6 +161,14 @@ :type associate_public_ip_address: bool :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. + + :type classic_link_vpc_id: str + :param classic_link_vpc_id: ID of ClassicLink enabled VPC. + + :type classic_link_vpc_security_groups: list + :param classic_link_vpc_security_groups: Security group + id's of the security groups with which to associate the + ClassicLink VPC instances. """ self.connection = connection self.name = name @@ -183,6 +193,10 @@ self.delete_on_termination = delete_on_termination self.iops = iops self.use_block_device_types = use_block_device_types + self.classic_link_vpc_id = classic_link_vpc_id + classic_link_vpc_sec_groups = classic_link_vpc_security_groups or [] + self.classic_link_vpc_security_groups = \ + ListElement(classic_link_vpc_sec_groups) if connection is not None: self.use_block_device_types = connection.use_block_device_types @@ -193,6 +207,8 @@ def startElement(self, name, attrs, connection): if name == 'SecurityGroups': return self.security_groups + elif name == 'ClassicLinkVPCSecurityGroups': + return self.classic_link_vpc_security_groups elif name == 'BlockDeviceMappings': if self.use_block_device_types: self.block_device_mappings = BDM() @@ -244,6 +260,8 @@ self.delete_on_termination = False elif name == 'Iops': self.iops = int(value) + elif name == 'ClassicLinkVPCId': + self.classic_link_vpc_id = value else: setattr(self, name, value) diff -Nru python-boto-2.34.0/boto/ec2/connection.py python-boto-2.38.0/boto/ec2/connection.py --- python-boto-2.34.0/boto/ec2/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -72,7 +72,7 @@ class EC2Connection(AWSQueryConnection): - APIVersion = boto.config.get('Boto', 'ec2_version', '2014-05-01') + APIVersion = boto.config.get('Boto', 'ec2_version', '2014-10-01') DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', 'ec2.us-east-1.amazonaws.com') @@ -104,9 +104,8 @@ if api_version: self.APIVersion = api_version - @detect_potential_sigv4 def _required_auth_capability(self): - return ['ec2'] + return ['hmac-v4'] def get_params(self): """ @@ -3786,6 +3785,8 @@ params[prefix + 'Platform'] = tc.platform if tc.instance_count is not None: params[prefix + 'InstanceCount'] = tc.instance_count + if tc.instance_type is not None: + params[prefix + 'InstanceType'] = tc.instance_type def modify_reserved_instances(self, client_token, reserved_instance_ids, target_configurations): @@ -4459,3 +4460,47 @@ if dry_run: params['DryRun'] = 'true' return self.get_status('ModifyVpcAttribute', params, verb='POST') + + def get_all_classic_link_instances(self, instance_ids=None, filters=None, + dry_run=False, max_results=None, + next_token=None): + """ + Get all of your linked EC2-Classic instances. This request only + returns information about EC2-Classic instances linked to + a VPC through ClassicLink + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs. Must be + instances linked to a VPC through ClassicLink. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Instance` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeClassicLinkInstances', params, + [('item', Instance)], verb='POST') diff -Nru python-boto-2.34.0/boto/ec2/elb/__init__.py python-boto-2.38.0/boto/ec2/elb/__init__.py --- python-boto-2.34.0/boto/ec2/elb/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/elb/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -98,7 +98,7 @@ profile_name=profile_name) def _required_auth_capability(self): - return ['ec2'] + return ['hmac-v4'] def build_list_params(self, params, items, label): if isinstance(items, six.string_types): @@ -695,9 +695,9 @@ def apply_security_groups_to_lb(self, name, security_groups): """ - Applies security groups to the load balancer. - Applying security groups that are already registered with the - Load Balancer has no effect. + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. :type name: string :param name: The name of the Load Balancer diff -Nru python-boto-2.34.0/boto/ec2/elb/loadbalancer.py python-boto-2.38.0/boto/ec2/elb/loadbalancer.py --- python-boto-2.34.0/boto/ec2/elb/loadbalancer.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/elb/loadbalancer.py 2015-04-09 18:57:51.000000000 +0000 @@ -404,9 +404,9 @@ def apply_security_groups(self, security_groups): """ - Applies security groups to the load balancer. - Applying security groups that are already registered with the - Load Balancer has no effect. + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. :type security_groups: string or List of strings :param security_groups: The name of the security group(s) to add. diff -Nru python-boto-2.34.0/boto/ec2/reservedinstance.py python-boto-2.38.0/boto/ec2/reservedinstance.py --- python-boto-2.34.0/boto/ec2/reservedinstance.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/reservedinstance.py 2015-04-09 18:57:51.000000000 +0000 @@ -134,6 +134,7 @@ self.instance_count = instance_count self.state = state self.start = None + self.end = None def __repr__(self): return 'ReservedInstance:%s' % self.id @@ -147,6 +148,8 @@ self.state = value elif name == 'start': self.start = value + elif name == 'end': + self.end = value else: super(ReservedInstance, self).endElement(name, value, connection) diff -Nru python-boto-2.34.0/boto/ec2/snapshot.py python-boto-2.38.0/boto/ec2/snapshot.py --- python-boto-2.34.0/boto/ec2/snapshot.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ec2/snapshot.py 2015-04-09 18:57:51.000000000 +0000 @@ -141,7 +141,7 @@ :type volume_type: string :param volume_type: The type of the volume. (optional). Valid - values are: standard | io1. + values are: standard | io1 | gp2. :type iops: int :param iops: The provisioned IOPs you want to associate with diff -Nru python-boto-2.34.0/boto/ec2containerservice/exceptions.py python-boto-2.38.0/boto/ec2containerservice/exceptions.py --- python-boto-2.34.0/boto/ec2containerservice/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/ec2containerservice/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,31 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class ServerException(BotoServerError): + pass + + +class ClientException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/ec2containerservice/__init__.py python-boto-2.38.0/boto/ec2containerservice/__init__.py --- python-boto-2.34.0/boto/ec2containerservice/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/ec2containerservice/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon EC2 Container Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.ec2containerservice import EC2ContainerServiceConnection + return get_regions('', connection_cls=EC2ContainerServiceConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/ec2containerservice/layer1.py python-boto-2.38.0/boto/ec2containerservice/layer1.py --- python-boto-2.34.0/boto/ec2containerservice/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/ec2containerservice/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,748 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.ec2containerservice import exceptions + + +class EC2ContainerServiceConnection(AWSQueryConnection): + """ + Amazon EC2 Container Service (Amazon ECS) is a highly scalable, + fast, container management service that makes it easy to run, + stop, and manage Docker containers on a cluster of Amazon EC2 + instances. Amazon ECS lets you launch and stop container-enabled + applications with simple API calls, allows you to get the state of + your cluster from a centralized service, and gives you access to + many familiar Amazon EC2 features like security groups, Amazon EBS + volumes, and IAM roles. + + You can use Amazon ECS to schedule the placement of containers + across your cluster based on your resource needs, isolation + policies, and availability requirements. Amazon EC2 Container + Service eliminates the need for you to operate your own cluster + management and configuration management systems or worry about + scaling your management infrastructure. + """ + APIVersion = "2014-11-13" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "ServerException": exceptions.ServerException, + "ClientException": exceptions.ClientException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(EC2ContainerServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_cluster(self, cluster_name=None): + """ + Creates a new Amazon ECS cluster. By default, your account + will receive a `default` cluster when you launch your first + container instance. However, you can create your own cluster + with a unique name with the `CreateCluster` action. + + During the preview, each account is limited to two clusters. + + :type cluster_name: string + :param cluster_name: The name of your cluster. If you do not specify a + name for your cluster, you will create a cluster named `default`. + + """ + params = {} + if cluster_name is not None: + params['clusterName'] = cluster_name + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster): + """ + Deletes the specified cluster. You must deregister all + container instances from this cluster before you may delete + it. You can list the container instances in a cluster with + ListContainerInstances and deregister them with + DeregisterContainerInstance. + + :type cluster: string + :param cluster: The cluster you want to delete. + + """ + params = {'cluster': cluster, } + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def deregister_container_instance(self, container_instance, cluster=None, + force=None): + """ + Deregisters an Amazon ECS container instance from the + specified cluster. This instance will no longer be available + to run tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instance you want to + deregister. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance you want to + deregister. The ARN contains the `arn:aws:ecs` namespace, followed + by the region of the container instance, the AWS account ID of the + container instance owner, the `container-instance` namespace, and + then the container instance UUID. For example, arn:aws:ecs: region + : aws_account_id :container-instance/ container_instance_UUID . + + :type force: boolean + :param force: Force the deregistration of the container instance. You + can use the `force` parameter if you have several tasks running on + a container instance and you don't want to run `StopTask` for each + task before deregistering the container instance. + + """ + params = {'containerInstance': container_instance, } + if cluster is not None: + params['cluster'] = cluster + if force is not None: + params['force'] = str( + force).lower() + return self._make_request( + action='DeregisterContainerInstance', + verb='POST', + path='/', params=params) + + def deregister_task_definition(self, task_definition): + """ + Deregisters the specified task definition. You will no longer + be able to run tasks from this definition after + deregistration. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to deregister. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DeregisterTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_clusters(self, clusters=None): + """ + Describes one or more of your clusters. + + :type clusters: list + :param clusters: A space-separated list of cluster names or full + cluster Amazon Resource Name (ARN) entries. If you do not specify a + cluster, the default cluster is assumed. + + """ + params = {} + if clusters is not None: + self.build_list_params(params, + clusters, + 'clusters.member') + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_container_instances(self, container_instances, cluster=None): + """ + Describes Amazon EC2 Container Service container instances. + Returns metadata about registered and remaining resources on + each container instance requested. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to + describe. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instances: list + :param container_instances: A space-separated list of container + instance UUIDs or full Amazon Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeContainerInstances', + verb='POST', + path='/', params=params) + + def describe_task_definition(self, task_definition): + """ + Describes a task definition. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to describe. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DescribeTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_tasks(self, tasks, cluster=None): + """ + Describes a specified task or tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to describe. If you do not + specify a cluster, the default cluster is assumed. + + :type tasks: list + :param tasks: A space-separated list of task UUIDs or full Amazon + Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + tasks, + 'tasks.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeTasks', + verb='POST', + path='/', params=params) + + def discover_poll_endpoint(self, container_instance=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Returns an endpoint for the Amazon EC2 Container Service agent + to poll for updates. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance. The ARN contains the + `arn:aws:ecs` namespace, followed by the region of the container + instance, the AWS account ID of the container instance owner, the + `container-instance` namespace, and then the container instance + UUID. For example, arn:aws:ecs: region : aws_account_id :container- + instance/ container_instance_UUID . + + """ + params = {} + if container_instance is not None: + params['containerInstance'] = container_instance + return self._make_request( + action='DiscoverPollEndpoint', + verb='POST', + path='/', params=params) + + def list_clusters(self, next_token=None, max_results=None): + """ + Returns a list of existing clusters. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListClusters` request where `maxResults` was used and + the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of cluster results returned by + `ListClusters` in paginated output. When this parameter is used, + `ListClusters` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListClusters` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then + `ListClusters` returns up to 100 results and a `nextToken` value if + applicable. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListClusters', + verb='POST', + path='/', params=params) + + def list_container_instances(self, cluster=None, next_token=None, + max_results=None): + """ + Returns a list of container instances in a specified cluster. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to list. If + you do not specify a cluster, the default cluster is assumed.. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListContainerInstances` request where `maxResults` was + used and the results exceeded the value of that parameter. + Pagination continues from the end of the previous results that + returned the `nextToken` value. This value is `null` when there are + no more results to return. + + :type max_results: integer + :param max_results: The maximum number of container instance results + returned by `ListContainerInstances` in paginated output. When this + parameter is used, `ListContainerInstances` only returns + `maxResults` results in a single page along with a `nextToken` + response element. The remaining results of the initial request can + be seen by sending another `ListContainerInstances` request with + the returned `nextToken` value. This value can be between 1 and + 100. If this parameter is not used, then `ListContainerInstances` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListContainerInstances', + verb='POST', + path='/', params=params) + + def list_task_definitions(self, family_prefix=None, next_token=None, + max_results=None): + """ + Returns a list of task definitions that are registered to your + account. You can filter the results by family name with the + `familyPrefix` parameter. + + :type family_prefix: string + :param family_prefix: The name of the family that you want to filter + the `ListTaskDefinitions` results with. Specifying a `familyPrefix` + will limit the listed task definitions to definitions that belong + to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTaskDefinitions` request where `maxResults` was used + and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of task definition results + returned by `ListTaskDefinitions` in paginated output. When this + parameter is used, `ListTaskDefinitions` only returns `maxResults` + results in a single page along with a `nextToken` response element. + The remaining results of the initial request can be seen by sending + another `ListTaskDefinitions` request with the returned `nextToken` + value. This value can be between 1 and 100. If this parameter is + not used, then `ListTaskDefinitions` returns up to 100 results and + a `nextToken` value if applicable. + + """ + params = {} + if family_prefix is not None: + params['familyPrefix'] = family_prefix + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTaskDefinitions', + verb='POST', + path='/', params=params) + + def list_tasks(self, cluster=None, container_instance=None, family=None, + next_token=None, max_results=None): + """ + Returns a list of tasks for a specified cluster. You can + filter the results by family name or by a particular container + instance with the `family` and `containerInstance` parameters. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the tasks you want to list. If you do not + specify a cluster, the default cluster is assumed.. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance that you want to + filter the `ListTasks` results with. Specifying a + `containerInstance` will limit the results to tasks that belong to + that container instance. + + :type family: string + :param family: The name of the family that you want to filter the + `ListTasks` results with. Specifying a `family` will limit the + results to tasks that belong to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTasks` request where `maxResults` was used and the + results exceeded the value of that parameter. Pagination continues + from the end of the previous results that returned the `nextToken` + value. This value is `null` when there are no more results to + return. + + :type max_results: integer + :param max_results: The maximum number of task results returned by + `ListTasks` in paginated output. When this parameter is used, + `ListTasks` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListTasks` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then `ListTasks` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if container_instance is not None: + params['containerInstance'] = container_instance + if family is not None: + params['family'] = family + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTasks', + verb='POST', + path='/', params=params) + + def register_container_instance(self, cluster=None, + instance_identity_document=None, + instance_identity_document_signature=None, + total_resources=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Registers an Amazon EC2 instance into the specified cluster. + This instance will become available to place containers on. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to register your container instance with. + If you do not specify a cluster, the default cluster is assumed.. + + :type instance_identity_document: string + :param instance_identity_document: + + :type instance_identity_document_signature: string + :param instance_identity_document_signature: + + :type total_resources: list + :param total_resources: + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if instance_identity_document is not None: + params['instanceIdentityDocument'] = instance_identity_document + if instance_identity_document_signature is not None: + params['instanceIdentityDocumentSignature'] = instance_identity_document_signature + if total_resources is not None: + self.build_complex_list_params( + params, total_resources, + 'totalResources.member', + ('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue')) + return self._make_request( + action='RegisterContainerInstance', + verb='POST', + path='/', params=params) + + def register_task_definition(self, family, container_definitions): + """ + Registers a new task definition from the supplied `family` and + `containerDefinitions`. + + :type family: string + :param family: You can specify a `family` for a task definition, which + allows you to track multiple versions of the same task definition. + You can think of the `family` as a name for your task definition. + + :type container_definitions: list + :param container_definitions: A list of container definitions in JSON + format that describe the different containers that make up your + task. + + """ + params = {'family': family, } + self.build_complex_list_params( + params, container_definitions, + 'containerDefinitions.member', + ('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment')) + return self._make_request( + action='RegisterTaskDefinition', + verb='POST', + path='/', params=params) + + def run_task(self, task_definition, cluster=None, overrides=None, + count=None): + """ + Start a task using random placement and the default Amazon ECS + scheduler. If you want to use your own scheduler or place a + task on a specific container instance, use `StartTask` + instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to run your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to run. + + :type overrides: dict + :param overrides: + + :type count: integer + :param count: The number of instances of the specified task that you + would like to place on your cluster. + + """ + params = {'taskDefinition': task_definition, } + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + if count is not None: + params['count'] = count + return self._make_request( + action='RunTask', + verb='POST', + path='/', params=params) + + def start_task(self, task_definition, container_instances, cluster=None, + overrides=None): + """ + Starts a new task from the specified task definition on the + specified container instance or instances. If you want to use + the default Amazon ECS scheduler to place your task, use + `RunTask` instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to start your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to start. + + :type overrides: dict + :param overrides: + + :type container_instances: list + :param container_instances: The container instance UUIDs or full Amazon + Resource Name (ARN) entries for the container instances on which + you would like to place your task. + + """ + params = {'taskDefinition': task_definition, } + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + return self._make_request( + action='StartTask', + verb='POST', + path='/', params=params) + + def stop_task(self, task, cluster=None): + """ + Stops a running task. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to stop. If you do not + specify a cluster, the default cluster is assumed.. + + :type task: string + :param task: The task UUIDs or full Amazon Resource Name (ARN) entry of + the task you would like to stop. + + """ + params = {'task': task, } + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='StopTask', + verb='POST', + path='/', params=params) + + def submit_container_state_change(self, cluster=None, task=None, + container_name=None, status=None, + exit_code=None, reason=None, + network_bindings=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a container changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task that hosts the container. + + :type container_name: string + :param container_name: The name of the container. + + :type status: string + :param status: The status of the state change request. + + :type exit_code: integer + :param exit_code: The exit code returned for the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + :type network_bindings: list + :param network_bindings: The network bindings of the container. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if container_name is not None: + params['containerName'] = container_name + if status is not None: + params['status'] = status + if exit_code is not None: + params['exitCode'] = exit_code + if reason is not None: + params['reason'] = reason + if network_bindings is not None: + self.build_complex_list_params( + params, network_bindings, + 'networkBindings.member', + ('bindIP', 'containerPort', 'hostPort')) + return self._make_request( + action='SubmitContainerStateChange', + verb='POST', + path='/', params=params) + + def submit_task_state_change(self, cluster=None, task=None, status=None, + reason=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a task changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task in the state change request. + + :type status: string + :param status: The status of the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if status is not None: + params['status'] = status + if reason is not None: + params['reason'] = reason + return self._make_request( + action='SubmitTaskStateChange', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff -Nru python-boto-2.34.0/boto/emr/emrobject.py python-boto-2.38.0/boto/emr/emrobject.py --- python-boto-2.34.0/boto/emr/emrobject.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/emr/emrobject.py 2015-04-09 18:57:51.000000000 +0000 @@ -64,6 +64,10 @@ pass +class SupportedProduct(Arg): + pass + + class JobFlowStepList(EmrObject): def __ini__(self, connection=None): self.connection = connection @@ -190,6 +194,9 @@ elif name == 'BootstrapActions': self.bootstrapactions = ResultSet([('member', BootstrapAction)]) return self.bootstrapactions + elif name == 'SupportedProducts': + self.supported_products = ResultSet([('member', SupportedProduct)]) + return self.supported_products else: return None @@ -201,6 +208,11 @@ 'EndDateTime' ]) +class ClusterStateChangeReason(EmrObject): + Fields = set([ + 'Code', + 'Message' + ]) class ClusterStatus(EmrObject): Fields = set([ @@ -217,6 +229,9 @@ if name == 'Timeline': self.timeline = ClusterTimeline() return self.timeline + elif name == 'StateChangeReason': + self.statechangereason = ClusterStateChangeReason() + return self.statechangereason else: return None @@ -248,7 +263,10 @@ 'RunningAmiVersion', 'AutoTerminate', 'TerminationProtected', - 'VisibleToAllUsers' + 'VisibleToAllUsers', + 'MasterPublicDnsName', + 'NormalizedInstanceHours', + 'ServiceRole' ]) def __init__(self, connection=None): @@ -275,12 +293,24 @@ return None -class ClusterSummary(Cluster): +class ClusterSummary(EmrObject): Fields = set([ 'Id', - 'Name' + 'Name', + 'NormalizedInstanceHours' ]) + def __init__(self, connection): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + class ClusterSummaryList(EmrObject): Fields = set([ diff -Nru python-boto-2.34.0/boto/endpoints.json python-boto-2.38.0/boto/endpoints.json --- python-boto-2.34.0/boto/endpoints.json 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/endpoints.json 2015-04-09 18:57:51.000000000 +0000 @@ -12,6 +12,11 @@ "us-west-2": "autoscaling.us-west-2.amazonaws.com", "eu-central-1": "autoscaling.eu-central-1.amazonaws.com" }, + "awslambda": { + "us-east-1": "lambda.us-east-1.amazonaws.com", + "us-west-2": "lambda.us-west-2.amazonaws.com", + "eu-west-1": "lambda.eu-west-1.amazonaws.com" + }, "cloudformation": { "ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com", "ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com", @@ -36,6 +41,13 @@ "us-west-2": "cloudfront.amazonaws.com", "eu-central-1": "cloudfront.amazonaws.com" }, + "cloudhsm": { + "us-east-1": "cloudhsm.us-east-1.amazonaws.com", + "us-west-2": "cloudhsm.us-west-2.amazonaws.com", + "eu-west-1": "cloudhsm.eu-west-1.amazonaws.com", + "eu-central-1": "cloudhsm.eu-central-1.amazonaws.com", + "ap-southeast-2": "cloudhsm.ap-southeast-2.amazonaws.com" + }, "cloudsearch": { "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", "ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com", @@ -47,6 +59,17 @@ "us-west-2": "cloudsearch.us-west-2.amazonaws.com", "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com" }, + "cloudsearchdomain": { + "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com", + "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com", + "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com", + "us-east-1": "cloudsearch.us-east-1.amazonaws.com", + "us-west-1": "cloudsearch.us-west-1.amazonaws.com", + "us-west-2": "cloudsearch.us-west-2.amazonaws.com", + "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com" + }, "cloudtrail": { "ap-northeast-1": "cloudtrail.ap-northeast-1.amazonaws.com", "ap-southeast-1": "cloudtrail.ap-southeast-1.amazonaws.com", @@ -71,19 +94,28 @@ "us-west-2": "monitoring.us-west-2.amazonaws.com", "eu-central-1": "monitoring.eu-central-1.amazonaws.com" }, + "codedeploy": { + "us-east-1": "codedeploy.us-east-1.amazonaws.com", + "us-west-2": "codedeploy.us-west-2.amazonaws.com" + }, "cognito-identity": { "us-east-1": "cognito-identity.us-east-1.amazonaws.com" }, "cognito-sync": { "us-east-1": "cognito-sync.us-east-1.amazonaws.com" }, + "configservice": { + "us-east-1": "config.us-east-1.amazonaws.com", + "us-west-2": "config.us-west-2.amazonaws.com", + "eu-west-1": "config.eu-west-1.amazonaws.com", + "ap-southeast-2": "config.ap-southeast-2.amazonaws.com" + }, "datapipeline": { "us-east-1": "datapipeline.us-east-1.amazonaws.com", "us-west-2": "datapipeline.us-west-2.amazonaws.com", "eu-west-1": "datapipeline.eu-west-1.amazonaws.com", "ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com", - "eu-central-1": "datapipeline.eu-central-1.amazonaws.com" + "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com" }, "directconnect": { "ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com", @@ -122,6 +154,9 @@ "us-west-2": "ec2.us-west-2.amazonaws.com", "eu-central-1": "ec2.eu-central-1.amazonaws.com" }, + "ec2containerservice": { + "us-east-1": "ecs.us-east-1.amazonaws.com" + }, "elasticache": { "ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com", "ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com", @@ -169,7 +204,7 @@ "us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com", "us-west-1": "us-west-1.elasticmapreduce.amazonaws.com", "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com", - "eu-central-1": "eu-central-1.elasticmapreduce.amazonaws.com" + "eu-central-1": "elasticmapreduce.eu-central-1.amazonaws.com" }, "elastictranscoder": { "ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com", @@ -188,13 +223,15 @@ "us-east-1": "glacier.us-east-1.amazonaws.com", "us-west-1": "glacier.us-west-1.amazonaws.com", "us-west-2": "glacier.us-west-2.amazonaws.com", - "eu-central-1": "glacier.eu-central-1.amazonaws.com" + "eu-central-1": "glacier.eu-central-1.amazonaws.com", + "us-gov-west-1": "glacier.us-gov-west-1.amazonaws.com" }, "iam": { "ap-northeast-1": "iam.amazonaws.com", "ap-southeast-1": "iam.amazonaws.com", "ap-southeast-2": "iam.amazonaws.com", "cn-north-1": "iam.cn-north-1.amazonaws.com.cn", + "eu-central-1": "iam.amazonaws.com", "eu-west-1": "iam.amazonaws.com", "sa-east-1": "iam.amazonaws.com", "us-east-1": "iam.amazonaws.com", @@ -221,6 +258,17 @@ "ap-northeast-1": "kinesis.ap-northeast-1.amazonaws.com", "eu-central-1": "kinesis.eu-central-1.amazonaws.com" }, + "kms": { + "us-east-1": "kms.us-east-1.amazonaws.com", + "us-west-1": "kms.us-west-1.amazonaws.com", + "us-west-2": "kms.us-west-2.amazonaws.com", + "eu-west-1": "kms.eu-west-1.amazonaws.com", + "eu-central-1": "kms.eu-central-1.amazonaws.com", + "ap-southeast-2": "kms.ap-southeast-2.amazonaws.com", + "ap-southeast-1": "kms.ap-southeast-1.amazonaws.com", + "ap-northeast-1": "kms.ap-northeast-1.amazonaws.com", + "sa-east-1": "kms.sa-east-1.amazonaws.com" + }, "logs": { "us-east-1": "logs.us-east-1.amazonaws.com", "us-west-2": "logs.us-west-2.amazonaws.com", @@ -231,6 +279,10 @@ "us-east-1": "opsworks.us-east-1.amazonaws.com", "eu-central-1": "opsworks.eu-central-1.amazonaws.com" }, + "machinelearning": { + "us-east-1": "machinelearning.us-east-1.amazonaws.com", + "us-west-2": "machinelearning.us-west-2.amazonaws.com" + }, "rds": { "ap-northeast-1": "rds.ap-northeast-1.amazonaws.com", "ap-southeast-1": "rds.ap-southeast-1.amazonaws.com", @@ -257,6 +309,7 @@ "ap-northeast-1": "route53.amazonaws.com", "ap-southeast-1": "route53.amazonaws.com", "ap-southeast-2": "route53.amazonaws.com", + "eu-central-1": "route53.amazonaws.com", "eu-west-1": "route53.amazonaws.com", "sa-east-1": "route53.amazonaws.com", "us-east-1": "route53.amazonaws.com", @@ -313,7 +366,7 @@ "ap-northeast-1": "ap-northeast-1.queue.amazonaws.com", "ap-southeast-1": "ap-southeast-1.queue.amazonaws.com", "ap-southeast-2": "ap-southeast-2.queue.amazonaws.com", - "cn-north-1": "sqs.cn-north-1.amazonaws.com.cn", + "cn-north-1": "cn-north-1.queue.amazonaws.com.cn", "eu-west-1": "eu-west-1.queue.amazonaws.com", "sa-east-1": "sa-east-1.queue.amazonaws.com", "us-east-1": "queue.amazonaws.com", @@ -343,7 +396,8 @@ "us-east-1": "sts.amazonaws.com", "us-gov-west-1": "sts.us-gov-west-1.amazonaws.com", "us-west-1": "sts.amazonaws.com", - "us-west-2": "sts.amazonaws.com" + "us-west-2": "sts.amazonaws.com", + "eu-central-1": "sts.amazonaws.com" }, "support": { "us-east-1": "support.us-east-1.amazonaws.com", diff -Nru python-boto-2.34.0/boto/glacier/response.py python-boto-2.38.0/boto/glacier/response.py --- python-boto-2.34.0/boto/glacier/response.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/glacier/response.py 2015-04-09 18:57:51.000000000 +0000 @@ -36,9 +36,10 @@ if response_headers: for header_name, item_name in response_headers: self[item_name] = http_response.getheader(header_name) - if http_response.getheader('Content-Type') == 'application/json': - body = json.loads(http_response.read().decode('utf-8')) - self.update(body) + if http_response.status != 204: + if http_response.getheader('Content-Type') == 'application/json': + body = json.loads(http_response.read().decode('utf-8')) + self.update(body) size = http_response.getheader('Content-Length', None) if size is not None: self.size = size diff -Nru python-boto-2.34.0/boto/iam/connection.py python-boto-2.38.0/boto/iam/connection.py --- python-boto-2.34.0/boto/iam/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/iam/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -1544,3 +1544,99 @@ 'VirtualMFADeviceName': device_name } return self.get_response('CreateVirtualMFADevice', params) + + # + # IAM password policy + # + + def get_account_password_policy(self): + """ + Returns the password policy for the AWS account. + """ + params = {} + return self.get_response('GetAccountPasswordPolicy', params) + + def delete_account_password_policy(self): + """ + Delete the password policy currently set for the AWS account. + """ + params = {} + return self.get_response('DeleteAccountPasswordPolicy', params) + + def update_account_password_policy(self, allow_users_to_change_password=None, + hard_expiry=None, max_password_age=None , + minimum_password_length=None , + password_reuse_prevention=None, + require_lowercase_characters=None, + require_numbers=None, require_symbols=None , + require_uppercase_characters=None): + """ + Update the password policy for the AWS account. + + Notes: unset parameters will be reset to Amazon default settings! + Most of the password policy settings are enforced the next time your users + change their passwords. When you set minimum length and character type + requirements, they are enforced the next time your users change their + passwords - users are not forced to change their existing passwords, even + if the pre-existing passwords do not adhere to the updated password + policy. When you set a password expiration period, the expiration period + is enforced immediately. + + :type allow_users_to_change_password: bool + :param allow_users_to_change_password: Allows all IAM users in your account + to use the AWS Management Console to change their own passwords. + + :type hard_expiry: bool + :param hard_expiry: Prevents IAM users from setting a new password after + their password has expired. + + :type max_password_age: int + :param max_password_age: The number of days that an IAM user password is valid. + + :type minimum_password_length: int + :param minimum_password_length: The minimum number of characters allowed in + an IAM user password. + + :type password_reuse_prevention: int + :param password_reuse_prevention: Specifies the number of previous passwords + that IAM users are prevented from reusing. + + :type require_lowercase_characters: bool + :param require_lowercase_characters: Specifies whether IAM user passwords + must contain at least one lowercase character from the ISO basic Latin + alphabet (``a`` to ``z``). + + :type require_numbers: bool + :param require_numbers: Specifies whether IAM user passwords must contain at + least one numeric character (``0`` to ``9``). + + :type require_symbols: bool + :param require_symbols: Specifies whether IAM user passwords must contain at + least one of the following non-alphanumeric characters: + ``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '`` + + :type require_uppercase_characters: bool + :param require_uppercase_characters: Specifies whether IAM user passwords + must contain at least one uppercase character from the ISO basic Latin + alphabet (``A`` to ``Z``). + """ + params = {} + if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool: + params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower() + if hard_expiry is not None and type(allow_users_to_change_password) is bool: + params['HardExpiry'] = str(hard_expiry).lower() + if max_password_age is not None: + params['MaxPasswordAge'] = max_password_age + if minimum_password_length is not None: + params['MinimumPasswordLength'] = minimum_password_length + if password_reuse_prevention is not None: + params['PasswordReusePrevention'] = password_reuse_prevention + if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower() + if require_numbers is not None and type(allow_users_to_change_password) is bool: + params['RequireNumbers'] = str(require_numbers).lower() + if require_symbols is not None and type(allow_users_to_change_password) is bool: + params['RequireSymbols'] = str(require_symbols).lower() + if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower() + return self.get_response('UpdateAccountPasswordPolicy', params) diff -Nru python-boto-2.34.0/boto/__init__.py python-boto-2.38.0/boto/__init__.py --- python-boto-2.34.0/boto/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -38,7 +38,7 @@ from boto.compat import urlparse from boto.exception import InvalidUriError -__version__ = '2.34.0' +__version__ = '2.38.0' Version = __version__ # for backware compatibility # http://bugs.python.org/issue7980 @@ -664,6 +664,7 @@ def connect_cloudsearch2(aws_access_key_id=None, aws_secret_access_key=None, + sign_request=False, **kwargs): """ :type aws_access_key_id: string @@ -672,14 +673,37 @@ :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key + :type sign_request: bool + :param sign_request: whether or not to sign search and + upload requests + :rtype: :class:`boto.cloudsearch2.layer2.Layer2` :return: A connection to Amazon's CloudSearch2 service """ from boto.cloudsearch2.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, + sign_request=sign_request, **kwargs) +def connect_cloudsearchdomain(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection` + :return: A connection to Amazon's CloudSearch Domain service + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return CloudSearchDomainConnection(aws_access_key_id, + aws_secret_access_key, **kwargs) + + def connect_beanstalk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): @@ -925,6 +949,153 @@ aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs + ) + + +def connect_kms(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Key Management Service + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.KMSConnection` + :return: A connection to the AWS Key Management Service + """ + from boto.kms.layer1 import KMSConnection + return KMSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_awslambda(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Lambda + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection` + :return: A connection to the AWS Lambda service + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return AWSLambdaConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_codedeploy(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CodeDeploy + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.sync.layer1.CodeDeployConnection` + :return: A connection to the AWS CodeDeploy service + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return CodeDeployConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_configservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Config + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.ConfigServiceConnection` + :return: A connection to the AWS Config service + """ + from boto.configservice.layer1 import ConfigServiceConnection + return ConfigServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cloudhsm(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudHSM + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cloudhsm.layer1.CloudHSMConnection` + :return: A connection to the AWS CloudHSM service + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return CloudHSMConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_ec2containerservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon EC2 Container Service + rtype: :class:`boto.ec2containerservice.layer1.EC2ContainerServiceConnection` + :return: A connection to the Amazon EC2 Container Service + """ + from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection + return EC2ContainerServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_machinelearning(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Machine Learning service + rtype: :class:`boto.machinelearning.layer1.MachineLearningConnection` + :return: A connection to the Amazon Machine Learning service + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return MachineLearningConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs ) diff -Nru python-boto-2.34.0/boto/kinesis/layer1.py python-boto-2.38.0/boto/kinesis/layer1.py --- python-boto-2.34.0/boto/kinesis/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/kinesis/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -28,6 +28,7 @@ from boto.exception import JSONResponseError from boto.kinesis import exceptions from boto.compat import json +from boto.compat import six class KinesisConnection(AWSQueryConnection): @@ -67,23 +68,43 @@ def _required_auth_capability(self): return ['hmac-v4'] + def add_tags_to_stream(self, stream_name, tags): + """ + Adds or updates tags for the specified Amazon Kinesis stream. + Each stream can have up to 10 tags. + + If tags have already been assigned to the stream, + `AddTagsToStream` overwrites any existing tags that correspond + to the specified tag keys. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tags: map + :param tags: The set of key-value pairs to use to create the tags. + + """ + params = {'StreamName': stream_name, 'Tags': tags, } + return self.make_request(action='AddTagsToStream', + body=json.dumps(params)) + def create_stream(self, stream_name, shard_count): """ - This operation adds a new Amazon Kinesis stream to your AWS - account. A stream captures and transports data records that - are continuously emitted from different data sources or - producers . Scale-out within an Amazon Kinesis stream is - explicitly supported by means of shards, which are uniquely - identified groups of data records in an Amazon Kinesis stream. + Creates a Amazon Kinesis stream. A stream captures and + transports data records that are continuously emitted from + different data sources or producers . Scale-out within an + Amazon Kinesis stream is explicitly supported by means of + shards, which are uniquely identified groups of data records + in an Amazon Kinesis stream. You specify and control the number of shards that a stream is - composed of. Each shard can support up to 5 read transactions - per second up to a maximum total of 2 MB of data read per - second. Each shard can support up to 1000 write transactions - per second up to a maximum total of 1 MB data written per - second. You can add shards to a stream if the amount of data - input increases and you can remove shards if the amount of - data input decreases. + composed of. Each open shard can support up to 5 read + transactions per second, up to a maximum total of 2 MB of data + read per second. Each shard can support up to 1000 records + written per second, up to a maximum total of 1 MB data written + per second. You can add shards to a stream if the amount of + data input increases and you can remove shards if the amount + of data input decreases. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by @@ -93,27 +114,26 @@ `CreateStream` is an asynchronous operation. Upon receiving a `CreateStream` request, Amazon Kinesis immediately returns and - sets the stream status to CREATING. After the stream is - created, Amazon Kinesis sets the stream status to ACTIVE. You - should perform read and write operations only on an ACTIVE - stream. + sets the stream status to `CREATING`. After the stream is + created, Amazon Kinesis sets the stream status to `ACTIVE`. + You should perform read and write operations only on an + `ACTIVE` stream. You receive a `LimitExceededException` when making a `CreateStream` request if you try to do one of the following: - + Have more than five streams in the CREATING state at any + + Have more than five streams in the `CREATING` state at any point in time. + Create more shards than are authorized for your account. - **Note:** The default limit for an AWS account is two shards - per stream. If you need to create a stream with more than two - shards, contact AWS Support to increase the limit on your - account. + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. - You can use the `DescribeStream` operation to check the stream - status, which is returned in `StreamStatus`. + You can use `DescribeStream` to check the stream status, which + is returned in `StreamStatus`. `CreateStream` has a limit of 5 transactions per second per account. @@ -130,9 +150,9 @@ :param shard_count: The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. - **Note:** The default limit for an AWS account is two shards per - stream. If you need to create a stream with more than two shards, - contact AWS Support to increase the limit on your account. + **Note:** The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, `contact + AWS Support`_ to increase the limit on your account. """ params = { @@ -144,23 +164,23 @@ def delete_stream(self, stream_name): """ - This operation deletes a stream and all of its shards and - data. You must shut down any applications that are operating - on the stream before you delete the stream. If an application - attempts to operate on a deleted stream, it will receive the - exception `ResourceNotFoundException`. - - If the stream is in the ACTIVE state, you can delete it. After - a `DeleteStream` request, the specified stream is in the - DELETING state until Amazon Kinesis completes the deletion. + Deletes a stream and all its shards and data. You must shut + down any applications that are operating on the stream before + you delete the stream. If an application attempts to operate + on a deleted stream, it will receive the exception + `ResourceNotFoundException`. + + If the stream is in the `ACTIVE` state, you can delete it. + After a `DeleteStream` request, the specified stream is in the + `DELETING` state until Amazon Kinesis completes the deletion. **Note:** Amazon Kinesis might continue to accept data read - and write operations, such as PutRecord and GetRecords, on a - stream in the DELETING state until the stream deletion is - complete. + and write operations, such as PutRecord, PutRecords, and + GetRecords, on a stream in the `DELETING` state until the + stream deletion is complete. When you delete a stream, any shards in that stream are also - deleted. + deleted, and any tags are dissociated from the stream. You can use the DescribeStream operation to check the state of the stream, which is returned in `StreamStatus`. @@ -179,17 +199,17 @@ def describe_stream(self, stream_name, limit=None, exclusive_start_shard_id=None): """ - This operation returns the following information about the - stream: the current status of the stream, the stream Amazon - Resource Name (ARN), and an array of shard objects that - comprise the stream. For each shard object there is - information about the hash key and sequence number ranges that - the shard spans, and the IDs of any earlier shards that played - in a role in a MergeShards or SplitShard operation that - created the shard. A sequence number is the identifier - associated with every record ingested in the Amazon Kinesis - stream. The sequence number is assigned by the Amazon Kinesis - service when a record is put into the stream. + Describes the specified stream. + + The information about the stream includes its current status, + its Amazon Resource Name (ARN), and an array of shard objects. + For each shard object, there is information about the hash key + and sequence number ranges that the shard spans, and the IDs + of any earlier shards that played in a role in creating the + shard. A sequence number is the identifier associated with + every record ingested in the Amazon Kinesis stream. The + sequence number is assigned when a record is put into the + stream. You can limit the number of returned shards using the `Limit` parameter. The number of shards in a stream may be too large @@ -198,11 +218,11 @@ output. `HasMoreShards` is set to `True` when there is more data available. - If there are more shards available, you can request more - shards by using the shard ID of the last shard returned by the - `DescribeStream` request, in the `ExclusiveStartShardId` - parameter in a subsequent request to `DescribeStream`. - `DescribeStream` is a paginated operation. + `DescribeStream` is a paginated operation. If there are more + shards available, you can request them using the shard ID of + the last shard returned. Specify this ID in the + `ExclusiveStartShardId` parameter in a subsequent request to + `DescribeStream`. `DescribeStream` has a limit of 10 transactions per second per account. @@ -215,7 +235,7 @@ :type exclusive_start_shard_id: string :param exclusive_start_shard_id: The shard ID of the shard to start - with for the stream description. + with. """ params = {'StreamName': stream_name, } @@ -228,52 +248,72 @@ def get_records(self, shard_iterator, limit=None, b64_decode=True): """ - This operation returns one or more data records from a shard. - A `GetRecords` operation request can retrieve up to 10 MB of - data. - - You specify a shard iterator for the shard that you want to - read data from in the `ShardIterator` parameter. The shard - iterator specifies the position in the shard from which you - want to start reading data records sequentially. A shard - iterator specifies this position using the sequence number of - a data record in the shard. For more information about the - shard iterator, see GetShardIterator. - - `GetRecords` may return a partial result if the response size - limit is exceeded. You will get an error, but not a partial - result if the shard's provisioned throughput is exceeded, the - shard iterator has expired, or an internal processing failure - has occurred. Clients can request a smaller amount of data by - specifying a maximum number of returned records using the - `Limit` parameter. The `Limit` parameter can be set to an - integer value of up to 10,000. If you set the value to an - integer greater than 10,000, you will receive - `InvalidArgumentException`. - - A new shard iterator is returned by every `GetRecords` request - in `NextShardIterator`, which you use in the `ShardIterator` - parameter of the next `GetRecords` request. When you - repeatedly read from an Amazon Kinesis stream use a - GetShardIterator request to get the first shard iterator to - use in your first `GetRecords` request and then use the shard - iterator returned in `NextShardIterator` for subsequent reads. + Gets data records from a shard. - `GetRecords` can return `null` for the `NextShardIterator` to - reflect that the shard has been closed and that the requested - shard iterator would never have returned more data. - - If no items can be processed because of insufficient - provisioned throughput on the shard involved in the request, - `GetRecords` throws `ProvisionedThroughputExceededException`. + Specify a shard iterator using the `ShardIterator` parameter. + The shard iterator specifies the position in the shard from + which you want to start reading data records sequentially. If + there are no records available in the portion of the shard + that the iterator points to, `GetRecords` returns an empty + list. Note that it might take multiple calls to get to a + portion of the shard that contains records. + + You can scale by provisioning multiple shards. Your + application should have one thread per shard, each reading + continuously from its stream. To read from a stream + continually, call `GetRecords` in a loop. Use GetShardIterator + to get the shard iterator to specify in the first `GetRecords` + call. `GetRecords` returns a new shard iterator in + `NextShardIterator`. Specify the shard iterator returned in + `NextShardIterator` in subsequent calls to `GetRecords`. Note + that if the shard has been closed, the shard iterator can't + return more data and `GetRecords` returns `null` in + `NextShardIterator`. You can terminate the loop when the shard + is closed, or when the shard iterator reaches the record with + the sequence number or other attribute that marks it as the + last record to process. + + Each data record can be up to 50 KB in size, and each shard + can read up to 2 MB per second. You can ensure that your calls + don't exceed the maximum supported size or throughput by using + the `Limit` parameter to specify the maximum number of records + that `GetRecords` can return. Consider your average record + size when determining this limit. For example, if your average + record size is 40 KB, you can limit the data returned to about + 1 MB per call by specifying 25 as the limit. + + The size of the data returned by `GetRecords` will vary + depending on the utilization of the shard. The maximum size of + data that `GetRecords` can return is 10 MB. If a call returns + 10 MB of data, subsequent calls made within the next 5 seconds + throw `ProvisionedThroughputExceededException`. If there is + insufficient provisioned throughput on the shard, subsequent + calls made within the next 1 second throw + `ProvisionedThroughputExceededException`. Note that + `GetRecords` won't return any data when it throws an + exception. For this reason, we recommend that you wait one + second between calls to `GetRecords`; however, it's possible + that the application will get exceptions for longer than 1 + second. + + To detect whether the application is falling behind in + processing, add a timestamp to your records and note how long + it takes to process them. You can also monitor how much data + is in a stream using the CloudWatch metrics for write + operations ( `PutRecord` and `PutRecords`). For more + information, see `Monitoring Amazon Kinesis with Amazon + CloudWatch`_ in the Amazon Kinesis Developer Guide . :type shard_iterator: string :param shard_iterator: The position in the shard from which you want to - start sequentially reading data records. + start sequentially reading data records. A shard iterator specifies + this position using the sequence number of a data record in the + shard. :type limit: integer - :param limit: The maximum number of records to return, which can be set - to a value of up to 10,000. + :param limit: The maximum number of records to return. Specify a value + of up to 10,000. If you specify a value that is greater than + 10,000, `GetRecords` throws `InvalidArgumentException`. :type b64_decode: boolean :param b64_decode: Decode the Base64-encoded ``Data`` field of records. @@ -297,32 +337,31 @@ def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number=None): """ - This operation returns a shard iterator in `ShardIterator`. - The shard iterator specifies the position in the shard from - which you want to start reading data records sequentially. A - shard iterator specifies this position using the sequence - number of a data record in a shard. A sequence number is the - identifier associated with every record ingested in the Amazon - Kinesis stream. The sequence number is assigned by the Amazon - Kinesis service when a record is put into the stream. - - You must specify the shard iterator type in the - `GetShardIterator` request. For example, you can set the - `ShardIteratorType` parameter to read exactly from the + Gets a shard iterator. A shard iterator expires five minutes + after it is returned to the requester. + + A shard iterator specifies the position in the shard from + which to start reading data records sequentially. A shard + iterator specifies this position using the sequence number of + a data record in a shard. A sequence number is the identifier + associated with every record ingested in the Amazon Kinesis + stream. The sequence number is assigned when a record is put + into the stream. + + You must specify the shard iterator type. For example, you can + set the `ShardIteratorType` parameter to read exactly from the position denoted by a specific sequence number by using the - AT_SEQUENCE_NUMBER shard iterator type, or right after the - sequence number by using the AFTER_SEQUENCE_NUMBER shard + `AT_SEQUENCE_NUMBER` shard iterator type, or right after the + sequence number by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using sequence numbers returned by earlier - PutRecord, GetRecords or DescribeStream requests. You can - specify the shard iterator type TRIM_HORIZON in the request to - cause `ShardIterator` to point to the last untrimmed record in - the shard in the system, which is the oldest data record in - the shard. Or you can point to just after the most recent - record in the shard, by using the shard iterator type LATEST, - so that you always read the most recent data in the shard. - - **Note:** Each shard iterator expires five minutes after it is - returned to the requester. + calls to PutRecord, PutRecords, GetRecords, or DescribeStream. + You can specify the shard iterator type `TRIM_HORIZON` in the + request to cause `ShardIterator` to point to the last + untrimmed record in the shard in the system, which is the + oldest data record in the shard. Or you can point to just + after the most recent record in the shard, by using the shard + iterator type `LATEST`, so that you always read the most + recent data in the shard. When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator request to get the first shard iterator to to @@ -333,18 +372,16 @@ `NextShardIterator`, which you use in the `ShardIterator` parameter of the next `GetRecords` request. - If a `GetShardIterator` request is made too often, you will - receive a `ProvisionedThroughputExceededException`. For more - information about throughput limits, see the `Amazon Kinesis - Developer Guide`_. - - `GetShardIterator` can return `null` for its `ShardIterator` - to indicate that the shard has been closed and that the - requested iterator will return no more data. A shard can be - closed by a SplitShard or MergeShards operation. + If a `GetShardIterator` request is made too often, you receive + a `ProvisionedThroughputExceededException`. For more + information about throughput limits, see GetRecords. + + If the shard is closed, the iterator can't return more data, + and `GetShardIterator` returns `null` for its `ShardIterator`. + A shard can be closed using SplitShard or MergeShards. `GetShardIterator` has a limit of 5 transactions per second - per account per shard. + per account per open shard. :type stream_name: string :param stream_name: The name of the stream. @@ -386,10 +423,7 @@ def list_streams(self, limit=None, exclusive_start_stream_name=None): """ - This operation returns an array of the names of all the - streams that are associated with the AWS account making the - `ListStreams` request. A given AWS account can have many - streams active at one time. + Lists your streams. The number of streams may be too large to return from a single call to `ListStreams`. You can limit the number of returned @@ -426,46 +460,74 @@ return self.make_request(action='ListStreams', body=json.dumps(params)) + def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, + limit=None): + """ + Lists the tags for the specified Amazon Kinesis stream. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type exclusive_start_tag_key: string + :param exclusive_start_tag_key: The key to use as the starting point + for the list of tags. If this parameter is set, `ListTagsForStream` + gets all tags that occur after `ExclusiveStartTagKey`. + + :type limit: integer + :param limit: The number of tags to return. If this number is less than + the total number of tags associated with the stream, `HasMoreTags` + is set to `True`. To list additional tags, set + `ExclusiveStartTagKey` to the last key in the response. + + """ + params = {'StreamName': stream_name, } + if exclusive_start_tag_key is not None: + params['ExclusiveStartTagKey'] = exclusive_start_tag_key + if limit is not None: + params['Limit'] = limit + return self.make_request(action='ListTagsForStream', + body=json.dumps(params)) + def merge_shards(self, stream_name, shard_to_merge, adjacent_shard_to_merge): """ - This operation merges two adjacent shards in a stream and - combines them into a single shard to reduce the stream's - capacity to ingest and transport data. Two shards are - considered adjacent if the union of the hash key ranges for - the two shards form a contiguous set with no gaps. For - example, if you have two shards, one with a hash key range of - 276...381 and the other with a hash key range of 382...454, - then you could merge these two shards into a single shard that - would have a hash key range of 276...454. After the merge, the - single child shard receives data for all hash key values - covered by the two parent shards. + Merges two adjacent shards in a stream and combines them into + a single shard to reduce the stream's capacity to ingest and + transport data. Two shards are considered adjacent if the + union of the hash key ranges for the two shards form a + contiguous set with no gaps. For example, if you have two + shards, one with a hash key range of 276...381 and the other + with a hash key range of 382...454, then you could merge these + two shards into a single shard that would have a hash key + range of 276...454. After the merge, the single child shard + receives data for all hash key values covered by the two + parent shards. `MergeShards` is called when there is a need to reduce the overall capacity of a stream because of excess capacity that - is not being used. The operation requires that you specify the - shard to be merged and the adjacent shard for a given stream. - For more information about merging shards, see the `Amazon - Kinesis Developer Guide`_. - - If the stream is in the ACTIVE state, you can call - `MergeShards`. If a stream is in CREATING or UPDATING or - DELETING states, then Amazon Kinesis returns a + is not being used. You must specify the shard to be merged and + the adjacent shard for a stream. For more information about + merging shards, see `Merge Two Shards`_ in the Amazon Kinesis + Developer Guide . + + If the stream is in the `ACTIVE` state, you can call + `MergeShards`. If a stream is in the `CREATING`, `UPDATING`, + or `DELETING` state, `MergeShards` returns a `ResourceInUseException`. If the specified stream does not - exist, Amazon Kinesis returns a `ResourceNotFoundException`. + exist, `MergeShards` returns a `ResourceNotFoundException`. - You can use the DescribeStream operation to check the state of - the stream, which is returned in `StreamStatus`. + You can use DescribeStream to check the state of the stream, + which is returned in `StreamStatus`. `MergeShards` is an asynchronous operation. Upon receiving a `MergeShards` request, Amazon Kinesis immediately returns a - response and sets the `StreamStatus` to UPDATING. After the + response and sets the `StreamStatus` to `UPDATING`. After the operation is completed, Amazon Kinesis sets the `StreamStatus` - to ACTIVE. Read and write operations continue to work while - the stream is in the UPDATING state. + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. - You use the DescribeStream operation to determine the shard - IDs that are specified in the `MergeShards` request. + You use DescribeStream to determine the shard IDs that are + specified in the `MergeShards` request. If you try to operate on too many streams in parallel using CreateStream, DeleteStream, `MergeShards` or SplitShard, you @@ -591,66 +653,171 @@ if sequence_number_for_ordering is not None: params['SequenceNumberForOrdering'] = sequence_number_for_ordering if b64_encode: - params['Data'] = base64.b64encode( - params['Data'].encode('utf-8')).decode('utf-8') + if not isinstance(params['Data'], six.binary_type): + params['Data'] = params['Data'].encode('utf-8') + params['Data'] = base64.b64encode(params['Data']).decode('utf-8') return self.make_request(action='PutRecord', body=json.dumps(params)) + def put_records(self, records, stream_name, b64_encode=True): + """ + Puts (writes) multiple data records from a producer into an + Amazon Kinesis stream in a single call (also referred to as a + `PutRecords` request). Use this operation to send data from a + data producer into the Amazon Kinesis stream for real-time + ingestion and processing. Each shard can support up to 1000 + records written per second, up to a maximum total of 1 MB data + written per second. + + You must specify the name of the stream that captures, stores, + and transports the data; and an array of request `Records`, + with each record in the array requiring a partition key and + data blob. + + The data blob can be any type of data; for example, a segment + from a log file, geographic/location data, website clickstream + data, and so on. + + The partition key is used by Amazon Kinesis as input to a hash + function that maps the partition key and associated data to a + specific shard. An MD5 hash function is used to map partition + keys to 128-bit integer values and to map associated data + records to shards. As a result of this hashing mechanism, all + data records with the same partition key map to the same shard + within the stream. For more information, see `Partition Key`_ + in the Amazon Kinesis Developer Guide . + + Each record in the `Records` array may include an optional + parameter, `ExplicitHashKey`, which overrides the partition + key to shard mapping. This parameter allows a data producer to + determine explicitly the shard where the record is stored. For + more information, see `Adding Multiple Records with + PutRecords`_ in the Amazon Kinesis Developer Guide . + + The `PutRecords` response includes an array of response + `Records`. Each record in the response array directly + correlates with a record in the request array using natural + ordering, from the top to the bottom of the request and + response. The response `Records` array always includes the + same number of records as the request array. + + The response `Records` array includes both successfully and + unsuccessfully processed records. Amazon Kinesis attempts to + process all records in each `PutRecords` request. A single + record failure does not stop the processing of subsequent + records. + + A successfully-processed record includes `ShardId` and + `SequenceNumber` values. The `ShardId` parameter identifies + the shard in the stream where the record is stored. The + `SequenceNumber` parameter is an identifier assigned to the + put record, unique to all records in the stream. + + An unsuccessfully-processed record includes `ErrorCode` and + `ErrorMessage` values. `ErrorCode` reflects the type of error + and can be one of the following values: + `ProvisionedThroughputExceededException` or `InternalFailure`. + `ErrorMessage` provides more detailed information about the + `ProvisionedThroughputExceededException` exception including + the account ID, stream name, and shard ID of the record that + was throttled. + + Data records are accessible for only 24 hours from the time + that they are added to an Amazon Kinesis stream. + + :type records: list + :param records: The records associated with the request. + + :type stream_name: string + :param stream_name: The stream name associated with the request. + + :type b64_encode: boolean + :param b64_encode: Whether to Base64 encode `data`. Can be set to + ``False`` if `data` is already encoded to prevent double encoding. + + """ + params = {'Records': records, 'StreamName': stream_name, } + if b64_encode: + for i in range(len(params['Records'])): + data = params['Records'][i]['Data'] + if not isinstance(data, six.binary_type): + data = data.encode('utf-8') + params['Records'][i]['Data'] = base64.b64encode( + data).decode('utf-8') + return self.make_request(action='PutRecords', + body=json.dumps(params)) + + def remove_tags_from_stream(self, stream_name, tag_keys): + """ + Deletes tags from the specified Amazon Kinesis stream. + + If you specify a tag that does not exist, it is ignored. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tag_keys: list + :param tag_keys: A list of tag keys. Each corresponding tag is removed + from the stream. + + """ + params = {'StreamName': stream_name, 'TagKeys': tag_keys, } + return self.make_request(action='RemoveTagsFromStream', + body=json.dumps(params)) + def split_shard(self, stream_name, shard_to_split, new_starting_hash_key): """ - This operation splits a shard into two new shards in the - stream, to increase the stream's capacity to ingest and - transport data. `SplitShard` is called when there is a need to - increase the overall capacity of stream because of an expected - increase in the volume of data records being ingested. + Splits a shard into two new shards in the stream, to increase + the stream's capacity to ingest and transport data. + `SplitShard` is called when there is a need to increase the + overall capacity of stream because of an expected increase in + the volume of data records being ingested. - `SplitShard` can also be used when a given shard appears to be + You can also use `SplitShard` when a shard appears to be approaching its maximum utilization, for example, when the set of producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call - the `SplitShard` operation to increase stream capacity, so - that more Amazon Kinesis applications can simultaneously read - data from the stream for real-time processing. - - The `SplitShard` operation requires that you specify the shard - to be split and the new hash key, which is the position in the - shard where the shard gets split in two. In many cases, the - new hash key might simply be the average of the beginning and - ending hash key, but it can be any hash key value in the range - being mapped into the shard. For more information about - splitting shards, see the `Amazon Kinesis Developer Guide`_. - - You can use the DescribeStream operation to determine the - shard ID and hash key values for the `ShardToSplit` and - `NewStartingHashKey` parameters that are specified in the - `SplitShard` request. + `SplitShard` to increase stream capacity, so that more Amazon + Kinesis applications can simultaneously read data from the + stream for real-time processing. + + You must specify the shard to be split and the new hash key, + which is the position in the shard where the shard gets split + in two. In many cases, the new hash key might simply be the + average of the beginning and ending hash key, but it can be + any hash key value in the range being mapped into the shard. + For more information about splitting shards, see `Split a + Shard`_ in the Amazon Kinesis Developer Guide . + + You can use DescribeStream to determine the shard ID and hash + key values for the `ShardToSplit` and `NewStartingHashKey` + parameters that are specified in the `SplitShard` request. `SplitShard` is an asynchronous operation. Upon receiving a `SplitShard` request, Amazon Kinesis immediately returns a - response and sets the stream status to UPDATING. After the + response and sets the stream status to `UPDATING`. After the operation is completed, Amazon Kinesis sets the stream status - to ACTIVE. Read and write operations continue to work while - the stream is in the UPDATING state. + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. You can use `DescribeStream` to check the status of the stream, which is returned in `StreamStatus`. If the stream is - in the ACTIVE state, you can call `SplitShard`. If a stream is - in CREATING or UPDATING or DELETING states, then Amazon - Kinesis returns a `ResourceInUseException`. - - If the specified stream does not exist, Amazon Kinesis returns - a `ResourceNotFoundException`. If you try to create more - shards than are authorized for your account, you receive a - `LimitExceededException`. - - **Note:** The default limit for an AWS account is two shards - per stream. If you need to create a stream with more than two - shards, contact AWS Support to increase the limit on your - account. + in the `ACTIVE` state, you can call `SplitShard`. If a stream + is in `CREATING` or `UPDATING` or `DELETING` states, + `DescribeStream` returns a `ResourceInUseException`. + + If the specified stream does not exist, `DescribeStream` + returns a `ResourceNotFoundException`. If you try to create + more shards than are authorized for your account, you receive + a `LimitExceededException`. + + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you - will receive a `LimitExceededException`. + receive a `LimitExceededException`. `SplitShard` has limit of 5 transactions per second per account. diff -Nru python-boto-2.34.0/boto/kms/exceptions.py python-boto-2.38.0/boto/kms/exceptions.py --- python-boto-2.34.0/boto/kms/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/kms/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,72 @@ +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidGrantTokenException(BotoServerError): + pass + + +class DisabledException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class DependencyTimeoutException(BotoServerError): + pass + + +class InvalidMarkerException(BotoServerError): + pass + + +class AlreadyExistsException(BotoServerError): + pass + + +class InvalidCiphertextException(BotoServerError): + pass + + +class KeyUnavailableException(BotoServerError): + pass + + +class InvalidAliasNameException(BotoServerError): + pass + + +class UnsupportedOperationException(BotoServerError): + pass + + +class InvalidArnException(BotoServerError): + pass + + +class KMSInternalException(BotoServerError): + pass + + +class InvalidKeyUsageException(BotoServerError): + pass + + +class MalformedPolicyDocumentException(BotoServerError): + pass + + +class NotFoundException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/kms/__init__.py python-boto-2.38.0/boto/kms/__init__.py --- python-boto-2.34.0/boto/kms/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/kms/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Key Management Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.kms.layer1 import KMSConnection + return get_regions('kms', connection_cls=KMSConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/kms/layer1.py python-boto-2.38.0/boto/kms/layer1.py --- python-boto-2.34.0/boto/kms/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/kms/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,821 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.kms import exceptions +from boto.compat import six +import base64 + + +class KMSConnection(AWSQueryConnection): + """ + AWS Key Management Service + AWS Key Management Service (KMS) is an encryption and key + management web service. This guide describes the KMS actions that + you can call programmatically. For general information about KMS, + see (need an address here). For the KMS developer guide, see (need + address here). + + AWS provides SDKs that consist of libraries and sample code for + various programming languages and platforms (Java, Ruby, .Net, + iOS, Android, etc.). The SDKs provide a convenient way to create + programmatic access to KMS and AWS. For example, the SDKs take + care of tasks such as signing requests (see below), managing + errors, and retrying requests automatically. For more information + about the AWS SDKs, including how to download and install them, + see `Tools for Amazon Web Services`_. + + We recommend that you use the AWS SDKs to make programmatic API + calls to KMS. However, you can also use the KMS Query API to make + to make direct calls to the KMS web service. + + **Signing Requests** + + Requests must be signed by using an access key ID and a secret + access key. We strongly recommend that you do not use your AWS + account access key ID and secret key for everyday work with KMS. + Instead, use the access key ID and secret access key for an IAM + user, or you can use the AWS Security Token Service to generate + temporary security credentials that you can use to sign requests. + + All KMS operations require `Signature Version 4`_. + + **Recording API Requests** + + KMS supports AWS CloudTrail, a service that records AWS API calls + and related events for your AWS account and delivers them to an + Amazon S3 bucket that you specify. By using the information + collected by CloudTrail, you can determine what requests were made + to KMS, who made the request, when it was made, and so on. To + learn more about CloudTrail, including how to turn it on and find + your log files, see the `AWS CloudTrail User Guide`_ + + **Additional Resources** + + For more information about credentials and request signing, see + the following: + + + + `AWS Security Credentials`_. This topic provides general + information about the types of credentials used for accessing AWS. + + `AWS Security Token Service`_. This guide describes how to + create and use temporary security credentials. + + `Signing AWS API Requests`_. This set of topics walks you + through the process of signing a request using an access key ID + and a secret access key. + """ + APIVersion = "2014-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "kms.us-east-1.amazonaws.com" + ServiceName = "KMS" + TargetPrefix = "TrentService" + ResponseError = JSONResponseError + + _faults = { + "InvalidGrantTokenException": exceptions.InvalidGrantTokenException, + "DisabledException": exceptions.DisabledException, + "LimitExceededException": exceptions.LimitExceededException, + "DependencyTimeoutException": exceptions.DependencyTimeoutException, + "InvalidMarkerException": exceptions.InvalidMarkerException, + "AlreadyExistsException": exceptions.AlreadyExistsException, + "InvalidCiphertextException": exceptions.InvalidCiphertextException, + "KeyUnavailableException": exceptions.KeyUnavailableException, + "InvalidAliasNameException": exceptions.InvalidAliasNameException, + "UnsupportedOperationException": exceptions.UnsupportedOperationException, + "InvalidArnException": exceptions.InvalidArnException, + "KMSInternalException": exceptions.KMSInternalException, + "InvalidKeyUsageException": exceptions.InvalidKeyUsageException, + "MalformedPolicyDocumentException": exceptions.MalformedPolicyDocumentException, + "NotFoundException": exceptions.NotFoundException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(KMSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_alias(self, alias_name, target_key_id): + """ + Creates a display name for a customer master key. An alias can + be used to identify a key and should be unique. The console + enforces a one-to-one mapping between the alias and a key. An + alias name can contain only alphanumeric characters, forward + slashes (/), underscores (_), and dashes (-). An alias must + start with the word "alias" followed by a forward slash + (alias/). An alias that begins with "aws" after the forward + slash (alias/aws...) is reserved by Amazon Web Services (AWS). + + :type alias_name: string + :param alias_name: String that contains the display name. Aliases that + begin with AWS are reserved. + + :type target_key_id: string + :param target_key_id: An identifier of the key for which you are + creating the alias. This value cannot be another alias. + + """ + params = { + 'AliasName': alias_name, + 'TargetKeyId': target_key_id, + } + return self.make_request(action='CreateAlias', + body=json.dumps(params)) + + def create_grant(self, key_id, grantee_principal, + retiring_principal=None, operations=None, + constraints=None, grant_tokens=None): + """ + Adds a grant to a key to specify who can access the key and + under what conditions. Grants are alternate permission + mechanisms to key policies. If absent, access to the key is + evaluated based on IAM policies attached to the user. By + default, grants do not expire. Grants can be listed, retired, + or revoked as indicated by the following APIs. Typically, when + you are finished using a grant, you retire it. When you want + to end a grant immediately, revoke it. For more information + about grants, see `Grants`_. + + #. ListGrants + #. RetireGrant + #. RevokeGrant + + :type key_id: string + :param key_id: A unique key identifier for a customer master key. This + value can be a globally unique identifier, an ARN, or an alias. + + :type grantee_principal: string + :param grantee_principal: Principal given permission by the grant to + use the key identified by the `keyId` parameter. + + :type retiring_principal: string + :param retiring_principal: Principal given permission to retire the + grant. For more information, see RetireGrant. + + :type operations: list + :param operations: List of operations permitted by the grant. This can + be any combination of one or more of the following values: + + #. Decrypt + #. Encrypt + #. GenerateDataKey + #. GenerateDataKeyWithoutPlaintext + #. ReEncryptFrom + #. ReEncryptTo + #. CreateGrant + + :type constraints: dict + :param constraints: Specifies the conditions under which the actions + specified by the `Operations` parameter are allowed. + + :type grant_tokens: list + :param grant_tokens: List of grant tokens. + + """ + params = { + 'KeyId': key_id, + 'GranteePrincipal': grantee_principal, + } + if retiring_principal is not None: + params['RetiringPrincipal'] = retiring_principal + if operations is not None: + params['Operations'] = operations + if constraints is not None: + params['Constraints'] = constraints + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + return self.make_request(action='CreateGrant', + body=json.dumps(params)) + + def create_key(self, policy=None, description=None, key_usage=None): + """ + Creates a customer master key. Customer master keys can be + used to encrypt small amounts of data (less than 4K) directly, + but they are most commonly used to encrypt or envelope data + keys that are then used to encrypt customer data. For more + information about data keys, see GenerateDataKey and + GenerateDataKeyWithoutPlaintext. + + :type policy: string + :param policy: Policy to be attached to the key. This is required and + delegates back to the account. The key is the root of trust. + + :type description: string + :param description: Description of the key. We recommend that you + choose a description that helps your customer decide whether the + key is appropriate for a task. + + :type key_usage: string + :param key_usage: Specifies the intended use of the key. Currently this + defaults to ENCRYPT/DECRYPT, and only symmetric encryption and + decryption are supported. + + """ + params = {} + if policy is not None: + params['Policy'] = policy + if description is not None: + params['Description'] = description + if key_usage is not None: + params['KeyUsage'] = key_usage + return self.make_request(action='CreateKey', + body=json.dumps(params)) + + def decrypt(self, ciphertext_blob, encryption_context=None, + grant_tokens=None): + """ + Decrypts ciphertext. Ciphertext is plaintext that has been + previously encrypted by using the Encrypt function. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext including metadata. + + :type encryption_context: map + :param encryption_context: The encryption context. If this was + specified in the Encrypt function, it must be specified here or the + decryption operation will fail. For more information, see + `Encryption Context`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform decryption. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = {'CiphertextBlob': ciphertext_blob, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Decrypt', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def delete_alias(self, alias_name): + """ + Deletes the specified alias. + + :type alias_name: string + :param alias_name: The alias to be deleted. + + """ + params = {'AliasName': alias_name, } + return self.make_request(action='DeleteAlias', + body=json.dumps(params)) + + def describe_key(self, key_id): + """ + Provides detailed information about the specified customer + master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + described. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DescribeKey', + body=json.dumps(params)) + + def disable_key(self, key_id): + """ + Marks a key as disabled, thereby preventing its use. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + disabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKey', + body=json.dumps(params)) + + def disable_key_rotation(self, key_id): + """ + Disables rotation of the specified key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be disabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKeyRotation', + body=json.dumps(params)) + + def enable_key(self, key_id): + """ + Marks a key as enabled, thereby permitting its use. You can + have up to 25 enabled keys at one time. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + enabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKey', + body=json.dumps(params)) + + def enable_key_rotation(self, key_id): + """ + Enables rotation of the specified customer master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be enabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKeyRotation', + body=json.dumps(params)) + + def encrypt(self, key_id, plaintext, encryption_context=None, + grant_tokens=None): + """ + Encrypts plaintext into ciphertext by using a customer master + key. + + :type key_id: string + :param key_id: Unique identifier of the customer master. This can be an + ARN, an alias, or the Key ID. + + :type plaintext: blob + :param plaintext: Data to be encrypted. + + :type encryption_context: map + :param encryption_context: Name:value pair that specifies the + encryption context to be used for authenticated encryption. For + more information, see `Authenticated Encryption`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform encryption. + + """ + if not isinstance(plaintext, six.binary_type): + raise TypeError( + "Value of argument ``plaintext`` " + "must be of type %s." % six.binary_type) + plaintext = base64.b64encode(plaintext) + params = {'KeyId': key_id, 'Plaintext': plaintext, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Encrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_data_key(self, key_id, encryption_context=None, + number_of_bytes=None, key_spec=None, + grant_tokens=None): + """ + Generates a secure data key. Data keys are used to encrypt and + decrypt data. They are wrapped by customer master keys. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name/value pair that contains additional + data to be authenticated during the encryption and decryption + processes that use the key. This value is logged by AWS CloudTrail + to provide context around the data encrypted by the key. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. 1024 is + the current limit. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size to generate a data key for. Currently this can be AES_128 or + AES_256. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if key_spec is not None: + params['KeySpec'] = key_spec + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKey', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def generate_data_key_without_plaintext(self, key_id, + encryption_context=None, + key_spec=None, + number_of_bytes=None, + grant_tokens=None): + """ + Returns a key wrapped by a customer master key without the + plaintext copy of that key. To retrieve the plaintext, see + GenerateDataKey. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name:value pair that contains additional + data to be authenticated during the encryption and decryption + processes. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size. Currently this can be AES_128 or AES_256. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if key_spec is not None: + params['KeySpec'] = key_spec + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKeyWithoutPlaintext', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_random(self, number_of_bytes=None): + """ + Generates an unpredictable byte string. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. The + current limit is 1024 bytes. + + """ + params = {} + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + response = self.make_request(action='GenerateRandom', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def get_key_policy(self, key_id, policy_name): + """ + Retrieves a policy attached to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: String that contains the name of the policy. + Currently, this must be "default". Policy names can be discovered + by calling ListKeyPolicies. + + """ + params = {'KeyId': key_id, 'PolicyName': policy_name, } + return self.make_request(action='GetKeyPolicy', + body=json.dumps(params)) + + def get_key_rotation_status(self, key_id): + """ + Retrieves a Boolean value that indicates whether key rotation + is enabled for the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='GetKeyRotationStatus', + body=json.dumps(params)) + + def list_aliases(self, limit=None, marker=None): + """ + Lists all of the key aliases in the account. + + :type limit: integer + :param limit: Specify this parameter when paginating results to + indicate the maximum number of aliases you want in each response. + If there are additional aliases beyond the maximum you specify, the + `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter when paginating results, and only in + a subsequent request after you've received a response where the + results are truncated. Set it to the value of the `NextMarker` + element in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListAliases', + body=json.dumps(params)) + + def list_grants(self, key_id, limit=None, marker=None): + """ + List the grants for a specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of grants you want listed in the + response. If there are additional grants beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListGrants', + body=json.dumps(params)) + + def list_key_policies(self, key_id, limit=None, marker=None): + """ + Retrieves a list of policies attached to a key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of policies you want listed in the + response. If there are additional policies beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeyPolicies', + body=json.dumps(params)) + + def list_keys(self, limit=None, marker=None): + """ + Lists the customer master keys. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of keys you want listed in the + response. If there are additional keys beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeys', + body=json.dumps(params)) + + def put_key_policy(self, key_id, policy_name, policy): + """ + Attaches a policy to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: Name of the policy to be attached. Currently, the + only supported name is "default". + + :type policy: string + :param policy: The policy, in JSON format, to be attached to the key. + + """ + params = { + 'KeyId': key_id, + 'PolicyName': policy_name, + 'Policy': policy, + } + return self.make_request(action='PutKeyPolicy', + body=json.dumps(params)) + + def re_encrypt(self, ciphertext_blob, destination_key_id, + source_encryption_context=None, + destination_encryption_context=None, grant_tokens=None): + """ + Encrypts data on the server side with a new customer master + key without exposing the plaintext of the data on the client + side. The data is first decrypted and then encrypted. This + operation can also be used to change the encryption context of + a ciphertext. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext of the data to re-encrypt. + + :type source_encryption_context: map + :param source_encryption_context: Encryption context used to encrypt + and decrypt the data specified in the `CiphertextBlob` parameter. + + :type destination_key_id: string + :param destination_key_id: Key identifier of the key used to re-encrypt + the data. + + :type destination_encryption_context: map + :param destination_encryption_context: Encryption context to be used + when the data is re-encrypted. + + :type grant_tokens: list + :param grant_tokens: Grant tokens that identify the grants that have + permissions for the encryption and decryption process. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = { + 'CiphertextBlob': ciphertext_blob, + 'DestinationKeyId': destination_key_id, + } + if source_encryption_context is not None: + params['SourceEncryptionContext'] = source_encryption_context + if destination_encryption_context is not None: + params['DestinationEncryptionContext'] = destination_encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='ReEncrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def retire_grant(self, grant_token): + """ + Retires a grant. You can retire a grant when you're done using + it to clean up. You should revoke a grant when you intend to + actively deny operations that depend on it. + + :type grant_token: string + :param grant_token: Token that identifies the grant to be retired. + + """ + params = {'GrantToken': grant_token, } + return self.make_request(action='RetireGrant', + body=json.dumps(params)) + + def revoke_grant(self, key_id, grant_id): + """ + Revokes a grant. You can revoke a grant to actively deny + operations that depend on it. + + :type key_id: string + :param key_id: Unique identifier of the key associated with the grant. + + :type grant_id: string + :param grant_id: Identifier of the grant to be revoked. + + """ + params = {'KeyId': key_id, 'GrantId': grant_id, } + return self.make_request(action='RevokeGrant', + body=json.dumps(params)) + + def update_key_description(self, key_id, description): + """ + + + :type key_id: string + :param key_id: + + :type description: string + :param description: + + """ + params = {'KeyId': key_id, 'Description': description, } + return self.make_request(action='UpdateKeyDescription', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff -Nru python-boto-2.34.0/boto/machinelearning/exceptions.py python-boto-2.38.0/boto/machinelearning/exceptions.py --- python-boto-2.34.0/boto/machinelearning/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/machinelearning/exceptions.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,51 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InternalServerException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class IdempotentParameterMismatchException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class PredictorNotMountedException(BotoServerError): + pass + + +class InvalidInputException(BotoServerError): + pass diff -Nru python-boto-2.34.0/boto/machinelearning/__init__.py python-boto-2.38.0/boto/machinelearning/__init__.py --- python-boto-2.34.0/boto/machinelearning/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/machinelearning/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,42 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Machine Learning. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return get_regions('machinelearning', + connection_cls=MachineLearningConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.34.0/boto/machinelearning/layer1.py python-boto-2.38.0/boto/machinelearning/layer1.py --- python-boto-2.34.0/boto/machinelearning/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/boto/machinelearning/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,1408 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json, urlsplit +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.machinelearning import exceptions + + +class MachineLearningConnection(AWSQueryConnection): + """ + Definition of the public APIs exposed by Amazon Machine Learning + """ + APIVersion = "2014-12-12" + AuthServiceName = 'machinelearning' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "machinelearning.us-east-1.amazonaws.com" + ServiceName = "MachineLearning" + TargetPrefix = "AmazonML_20141212" + ResponseError = JSONResponseError + + _faults = { + "InternalServerException": exceptions.InternalServerException, + "LimitExceededException": exceptions.LimitExceededException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "IdempotentParameterMismatchException": exceptions.IdempotentParameterMismatchException, + "PredictorNotMountedException": exceptions.PredictorNotMountedException, + "InvalidInputException": exceptions.InvalidInputException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(MachineLearningConnection, self).__init__(**kwargs) + self.region = region + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_batch_prediction(self, batch_prediction_id, ml_model_id, + batch_prediction_data_source_id, output_uri, + batch_prediction_name=None): + """ + Generates predictions for a group of observations. The + observations to process exist in one or more data files + referenced by a `DataSource`. This operation creates a new + `BatchPrediction`, and uses an `MLModel` and the data files + referenced by the `DataSource` as information sources. + + `CreateBatchPrediction` is an asynchronous operation. In + response to `CreateBatchPrediction`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `BatchPrediction` + status to `PENDING`. After the `BatchPrediction` completes, + Amazon ML sets the status to `COMPLETED`. + + You can poll for status updates by using the + GetBatchPrediction operation and checking the `Status` + parameter of the result. After the `COMPLETED` status appears, + the results are available in the location specified by the + `OutputUri` parameter. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + :type batch_prediction_name: string + :param batch_prediction_name: A user-supplied name or description of + the `BatchPrediction`. `BatchPredictionName` can only use the UTF-8 + character set. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` that will generate + predictions for the group of observations. + + :type batch_prediction_data_source_id: string + :param batch_prediction_data_source_id: The ID of the `DataSource` that + points to the group of observations to predict. + + :type output_uri: string + :param output_uri: The location of an Amazon Simple Storage Service + (Amazon S3) bucket or directory to store the batch prediction + results. The following substrings are not allowed in the s3 key + portion of the "outputURI" field: ':', '//', '/./', '/../'. + Amazon ML needs permissions to store and retrieve the logs on your + behalf. For information about how to set permissions, see the + `Amazon Machine Learning Developer Guide`_. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'MLModelId': ml_model_id, + 'BatchPredictionDataSourceId': batch_prediction_data_source_id, + 'OutputUri': output_uri, + } + if batch_prediction_name is not None: + params['BatchPredictionName'] = batch_prediction_name + return self.make_request(action='CreateBatchPrediction', + body=json.dumps(params)) + + def create_data_source_from_rds(self, data_source_id, rds_data, role_arn, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object from an ` Amazon Relational + Database Service`_ (Amazon RDS). A `DataSource` references + data that can be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + `CreateDataSourceFromRDS` is an asynchronous operation. In + response to `CreateDataSourceFromRDS`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. Typically, an Amazon Resource Number (ARN) becomes + the ID for a `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type rds_data: dict + :param rds_data: + The data specification of an Amazon RDS `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon RDS database. + + ` InstanceIdentifier ` - Unique identifier for the Amazon RDS + database instance. + + + DatabaseCredentials - AWS Identity and Access Management (IAM) + credentials that are used to connect to the Amazon RDS database. + + ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an + Amazon Elastic Compute Cloud (EC2) instance to carry out the copy + task from Amazon RDS to Amazon S3. For more information, see `Role + templates`_ for data pipelines. + + ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data + Pipeline service to monitor the progress of the copy task from + Amazon RDS to Amazon Simple Storage Service (S3). For more + information, see `Role templates`_ for data pipelines. + + SecurityInfo - Security information to use to access an Amazon RDS + instance. You need to set up appropriate ingress rules for the + security entity IDs provided to allow access to the Amazon RDS + instance. Specify a [ `SubnetId`, `SecurityGroupIds`] pair for a + VPC-based Amazon RDS instance. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon S3 location for staging RDS data. The data + retrieved from Amazon RDS using `SelectSqlQuery` is stored in this + location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: The role that Amazon ML assumes on behalf of the user + to create and activate a data pipeline in the users account and + copy data (using the `SelectSqlQuery`) query from Amazon RDS to + Amazon S3. + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training. + + """ + params = { + 'DataSourceId': data_source_id, + 'RDSData': rds_data, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRDS', + body=json.dumps(params)) + + def create_data_source_from_redshift(self, data_source_id, data_spec, + role_arn, data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` from `Amazon Redshift`_. A `DataSource` + references data that can be used to perform either + CreateMLModel, CreateEvaluation or CreateBatchPrediction + operations. + + `CreateDataSourceFromRedshift` is an asynchronous operation. + In response to `CreateDataSourceFromRedshift`, Amazon Machine + Learning (Amazon ML) immediately returns and sets the + `DataSource` status to `PENDING`. After the `DataSource` is + created and ready for use, Amazon ML sets the `Status` + parameter to `COMPLETED`. `DataSource` in `COMPLETED` or + `PENDING` status can only be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observations should exist in the database hosted on an + Amazon Redshift cluster and should be specified by a + `SelectSqlQuery`. Amazon ML executes ` Unload`_ command in + Amazon Redshift to transfer the result set of `SelectSqlQuery` + to `S3StagingLocation.` + + After the `DataSource` is created, it's ready for use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item -- a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the Amazon Machine Learning Developer Guide. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of an Amazon Redshift `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon Redshift database. + + ` ClusterIdentifier ` - Unique ID for the Amazon Redshift cluster. + + + DatabaseCredentials - AWS Identity abd Access Management (IAM) + credentials that are used to connect to the Amazon Redshift + database. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon Simple Storage Service (Amazon S3) + location for staging Amazon Redshift data. The data retrieved from + Amazon Relational Database Service (Amazon RDS) using + `SelectSqlQuery` is stored in this location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: A fully specified role Amazon Resource Name (ARN). + Amazon ML assumes the role on behalf of the user to create the + following: + + + + A security group to allow Amazon ML to execute the `SelectSqlQuery` + query on an Amazon Redshift cluster + + An Amazon S3 bucket policy to grant Amazon ML read/write permissions + on the `S3StagingLocation` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRedshift', + body=json.dumps(params)) + + def create_data_source_from_s3(self, data_source_id, data_spec, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object. A `DataSource` references data + that can be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + `CreateDataSourceFromS3` is an asynchronous operation. In + response to `CreateDataSourceFromS3`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation or + CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observation data used in a `DataSource` should be ready to + use; that is, it should have a consistent structure, and + missing data values should be kept to a minimum. The + observation data must reside in one or more CSV files in an + Amazon Simple Storage Service (Amazon S3) bucket, along with a + schema that describes the data items by name and type. The + same schema must be used for all of the data files referenced + by the `DataSource`. + + After the `DataSource` has been created, it's ready to use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item: a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable, or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the `Amazon Machine Learning Developer + Guide`_. + + :type data_source_id: string + :param data_source_id: A user-supplied identifier that uniquely + identifies the `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of a `DataSource`: + + + + DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location + of the observation data. + + DataSchemaLocationS3 - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromS3', + body=json.dumps(params)) + + def create_evaluation(self, evaluation_id, ml_model_id, + evaluation_data_source_id, evaluation_name=None): + """ + Creates a new `Evaluation` of an `MLModel`. An `MLModel` is + evaluated on a set of observations associated to a + `DataSource`. Like a `DataSource` for an `MLModel`, the + `DataSource` for an `Evaluation` contains values for the + Target Variable. The `Evaluation` compares the predicted + result for each observation to the actual outcome and provides + a summary so that you know how effective the `MLModel` + functions on the test data. Evaluation generates a relevant + performance metric such as BinaryAUC, RegressionRMSE or + MulticlassAvgFScore based on the corresponding `MLModelType`: + `BINARY`, `REGRESSION` or `MULTICLASS`. + + `CreateEvaluation` is an asynchronous operation. In response + to `CreateEvaluation`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the evaluation status to + `PENDING`. After the `Evaluation` is created and ready for + use, Amazon ML sets the status to `COMPLETED`. + + You can use the GetEvaluation operation to check progress of + the evaluation during the creation operation. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation`. + + :type evaluation_name: string + :param evaluation_name: A user-supplied name or description of the + `Evaluation`. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` to evaluate. + The schema used in creating the `MLModel` must match the schema of the + `DataSource` used in the `Evaluation`. + + :type evaluation_data_source_id: string + :param evaluation_data_source_id: The ID of the `DataSource` for the + evaluation. The schema of the `DataSource` must match the schema + used to create the `MLModel`. + + """ + params = { + 'EvaluationId': evaluation_id, + 'MLModelId': ml_model_id, + 'EvaluationDataSourceId': evaluation_data_source_id, + } + if evaluation_name is not None: + params['EvaluationName'] = evaluation_name + return self.make_request(action='CreateEvaluation', + body=json.dumps(params)) + + def create_ml_model(self, ml_model_id, ml_model_type, + training_data_source_id, ml_model_name=None, + parameters=None, recipe=None, recipe_uri=None): + """ + Creates a new `MLModel` using the data files and the recipe as + information sources. + + An `MLModel` is nearly immutable. Users can only update the + `MLModelName` and the `ScoreThreshold` in an `MLModel` without + creating a new `MLModel`. + + `CreateMLModel` is an asynchronous operation. In response to + `CreateMLModel`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the `MLModel` status to + `PENDING`. After the `MLModel` is created and ready for use, + Amazon ML sets the status to `COMPLETED`. + + You can use the GetMLModel operation to check progress of the + `MLModel` during the creation operation. + + CreateMLModel requires a `DataSource` with computed + statistics, which can be created by setting + `ComputeStatistics` to `True` in CreateDataSourceFromRDS, + CreateDataSourceFromS3, or CreateDataSourceFromRedshift + operations. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type ml_model_type: string + :param ml_model_type: The category of supervised learning that this + `MLModel` will address. Choose from the following types: + + + Choose `REGRESSION` if the `MLModel` will be used to predict a + numeric value. + + Choose `BINARY` if the `MLModel` result has two possible values. + + Choose `MULTICLASS` if the `MLModel` result has a limited number of + values. + + + For more information, see the `Amazon Machine Learning Developer + Guide`_. + + :type parameters: map + :param parameters: + A list of the training parameters in the `MLModel`. The list is + implemented as a map of key/value pairs. + + The following is the current set of training parameters: + + + + `sgd.l1RegularizationAmount` - Coefficient regularization L1 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to zero, resulting in sparse + feature set. If you use this parameter, start by specifying a small + value such as 1.0E-08. The value is a double that ranges from 0 to + MAX_DOUBLE. The default is not to use L1 normalization. The + parameter cannot be used when `L2` is specified. Use this parameter + sparingly. + + `sgd.l2RegularizationAmount` - Coefficient regularization L2 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to small, nonzero values. If you + use this parameter, start by specifying a small value such as + 1.0E-08. The valuseis a double that ranges from 0 to MAX_DOUBLE. + The default is not to use L2 normalization. This cannot be used + when `L1` is specified. Use this parameter sparingly. + + `sgd.maxPasses` - Number of times that the training process traverses + the observations to build the `MLModel`. The value is an integer + that ranges from 1 to 10000. The default value is 10. + + `sgd.maxMLModelSizeInBytes` - Maximum allowed size of the model. + Depending on the input data, the size of the model might affect its + performance. The value is an integer that ranges from 100000 to + 2147483648. The default value is 33554432. + + :type training_data_source_id: string + :param training_data_source_id: The `DataSource` that points to the + training data. + + :type recipe: string + :param recipe: The data recipe for creating `MLModel`. You must specify + either the recipe or its URI. If you dont specify a recipe or its + URI, Amazon ML creates a default. + + :type recipe_uri: string + :param recipe_uri: The Amazon Simple Storage Service (Amazon S3) + location and file name that contains the `MLModel` recipe. You must + specify either the recipe or its URI. If you dont specify a recipe + or its URI, Amazon ML creates a default. + + """ + params = { + 'MLModelId': ml_model_id, + 'MLModelType': ml_model_type, + 'TrainingDataSourceId': training_data_source_id, + } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if parameters is not None: + params['Parameters'] = parameters + if recipe is not None: + params['Recipe'] = recipe + if recipe_uri is not None: + params['RecipeUri'] = recipe_uri + return self.make_request(action='CreateMLModel', + body=json.dumps(params)) + + def create_realtime_endpoint(self, ml_model_id): + """ + Creates a real-time endpoint for the `MLModel`. The endpoint + contains the URI of the `MLModel`; that is, the location to + send real-time prediction requests for the specified + `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='CreateRealtimeEndpoint', + body=json.dumps(params)) + + def delete_batch_prediction(self, batch_prediction_id): + """ + Assigns the DELETED status to a `BatchPrediction`, rendering + it unusable. + + After using the `DeleteBatchPrediction` operation, you can use + the GetBatchPrediction operation to verify that the status of + the `BatchPrediction` changed to DELETED. + + The result of the `DeleteBatchPrediction` operation is + irreversible. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='DeleteBatchPrediction', + body=json.dumps(params)) + + def delete_data_source(self, data_source_id): + """ + Assigns the DELETED status to a `DataSource`, rendering it + unusable. + + After using the `DeleteDataSource` operation, you can use the + GetDataSource operation to verify that the status of the + `DataSource` changed to DELETED. + + The results of the `DeleteDataSource` operation are + irreversible. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + """ + params = {'DataSourceId': data_source_id, } + return self.make_request(action='DeleteDataSource', + body=json.dumps(params)) + + def delete_evaluation(self, evaluation_id): + """ + Assigns the `DELETED` status to an `Evaluation`, rendering it + unusable. + + After invoking the `DeleteEvaluation` operation, you can use + the GetEvaluation operation to verify that the status of the + `Evaluation` changed to `DELETED`. + + The results of the `DeleteEvaluation` operation are + irreversible. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation` to delete. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='DeleteEvaluation', + body=json.dumps(params)) + + def delete_ml_model(self, ml_model_id): + """ + Assigns the DELETED status to an `MLModel`, rendering it + unusable. + + After using the `DeleteMLModel` operation, you can use the + GetMLModel operation to verify that the status of the + `MLModel` changed to DELETED. + + The result of the `DeleteMLModel` operation is irreversible. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteMLModel', + body=json.dumps(params)) + + def delete_realtime_endpoint(self, ml_model_id): + """ + Deletes a real time endpoint of an `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteRealtimeEndpoint', + body=json.dumps(params)) + + def describe_batch_predictions(self, filter_variable=None, eq=None, + gt=None, lt=None, ge=None, le=None, + ne=None, prefix=None, sort_order=None, + next_token=None, limit=None): + """ + Returns a list of `BatchPrediction` operations that match the + search criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of + `BatchPrediction`: + + + + `CreatedAt` - Sets the search criteria to the `BatchPrediction` + creation date. + + `Status` - Sets the search criteria to the `BatchPrediction` status. + + `Name` - Sets the search criteria to the contents of the + `BatchPrediction` ** ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `BatchPrediction` creation. + + `MLModelId` - Sets the search criteria to the `MLModel` used in the + `BatchPrediction`. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + the `BatchPrediction`. + + `DataURI` - Sets the search criteria to the data file(s) used in the + `BatchPrediction`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `BatchPrediction` results will + have `FilterVariable` values that exactly match the value specified + with `EQ`. + + :type gt: string + :param gt: The greater than operator. The `BatchPrediction` results + will have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `BatchPrediction` results will + have `FilterVariable` values that are less than the value specified + with `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are less than or + equal to the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `BatchPrediction` results + will have `FilterVariable` values not equal to the value specified + with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `Batch Prediction` operation could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this + `BatchPrediction`, select `Name` for the `FilterVariable` and any + of the following strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`s. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: An ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeBatchPredictions', + body=json.dumps(params)) + + def describe_data_sources(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, + prefix=None, sort_order=None, next_token=None, + limit=None): + """ + Returns a list of `DataSource` that match the search criteria + in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `DataSource`: + + + + `CreatedAt` - Sets the search criteria to `DataSource` creation + dates. + + `Status` - Sets the search criteria to `DataSource` statuses. + + `Name` - Sets the search criteria to the contents of `DataSource` ** + ** `Name`. + + `DataUri` - Sets the search criteria to the URI of data files used to + create the `DataSource`. The URI can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `DataSource` creation. + + :type eq: string + :param eq: The equal to operator. The `DataSource` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `DataSource` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `DataSource` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `DataSource` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `DataSource` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `DataSource` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `DataSource` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `DataSource`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `DataSource` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeDataSources', + body=json.dumps(params)) + + def describe_evaluations(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `DescribeEvaluations` that match the search + criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variable to filter a list of `Evaluation` + objects: + + + + `CreatedAt` - Sets the search criteria to the `Evaluation` creation + date. + + `Status` - Sets the search criteria to the `Evaluation` status. + + `Name` - Sets the search criteria to the contents of `Evaluation` ** + ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + an `Evaluation`. + + `MLModelId` - Sets the search criteria to the `MLModel` that was + evaluated. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + `Evaluation`. + + `DataUri` - Sets the search criteria to the data file(s) used in + `Evaluation`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `Evaluation` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `Evaluation` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `Evaluation` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `Evaluation` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `Evaluation` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `Evaluation` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `Evaluation` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `Evaluation`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `Evaluation` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeEvaluations', + body=json.dumps(params)) + + def describe_ml_models(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `MLModel` that match the search criteria in + the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `MLModel`: + + + + `CreatedAt` - Sets the search criteria to `MLModel` creation date. + + `Status` - Sets the search criteria to `MLModel` status. + + `Name` - Sets the search criteria to the contents of `MLModel` ** ** + `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `MLModel` creation. + + `TrainingDataSourceId` - Sets the search criteria to the `DataSource` + used to train one or more `MLModel`. + + `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel` + real-time endpoint status. + + `MLModelType` - Sets the search criteria to `MLModel` type: binary, + regression, or multi-class. + + `Algorithm` - Sets the search criteria to the algorithm that the + `MLModel` uses. + + `TrainingDataURI` - Sets the search criteria to the data file(s) used + in training a `MLModel`. The URL can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `MLModel` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `MLModel` results will have + `FilterVariable` values that are greater than the value specified + with `GT`. + + :type lt: string + :param lt: The less than operator. The `MLModel` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are greater than or equal to + the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `MLModel` results will have + `FilterVariable` values not equal to the value specified with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `MLModel` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeMLModels', + body=json.dumps(params)) + + def get_batch_prediction(self, batch_prediction_id): + """ + Returns a `BatchPrediction` that includes detailed metadata, + status, and data file information for a `Batch Prediction` + request. + + :type batch_prediction_id: string + :param batch_prediction_id: An ID assigned to the `BatchPrediction` at + creation. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='GetBatchPrediction', + body=json.dumps(params)) + + def get_data_source(self, data_source_id, verbose=None): + """ + Returns a `DataSource` that includes metadata and data file + information, as well as the current status of the + `DataSource`. + + `GetDataSource` provides results in normal or verbose format. + The verbose format adds the schema description and the list of + files pointed to by the DataSource to the normal format. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetDataSource` operation should + return `DataSourceSchema`. + If true, `DataSourceSchema` is returned. + + If false, `DataSourceSchema` is not returned. + + """ + params = {'DataSourceId': data_source_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetDataSource', + body=json.dumps(params)) + + def get_evaluation(self, evaluation_id): + """ + Returns an `Evaluation` that includes metadata as well as the + current status of the `Evaluation`. + + :type evaluation_id: string + :param evaluation_id: The ID of the `Evaluation` to retrieve. The + evaluation of each `MLModel` is recorded and cataloged. The ID + provides the means to access the information. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='GetEvaluation', + body=json.dumps(params)) + + def get_ml_model(self, ml_model_id, verbose=None): + """ + Returns an `MLModel` that includes detailed metadata, and data + source information as well as the current status of the + `MLModel`. + + `GetMLModel` provides results in normal or verbose format. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetMLModel` operation should + return `Recipe`. + If true, `Recipe` is returned. + + If false, `Recipe` is not returned. + + """ + params = {'MLModelId': ml_model_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetMLModel', + body=json.dumps(params)) + + def predict(self, ml_model_id, record, predict_endpoint): + """ + Generates a prediction for the observation using the specified + `MLModel`. + + + Not all response parameters will be populated because this is + dependent on the type of requested model. + + :type ml_model_id: string + :param ml_model_id: A unique identifier of the `MLModel`. + + :type record: map + :param record: A map of variable name-value pairs that represent an + observation. + + :type predict_endpoint: string + :param predict_endpoint: The endpoint to send the predict request to. + + """ + predict_host = urlsplit(predict_endpoint).hostname + if predict_host is None: + predict_host = predict_endpoint + + params = { + 'MLModelId': ml_model_id, + 'Record': record, + 'PredictEndpoint': predict_host, + } + return self.make_request(action='Predict', + body=json.dumps(params), + host=predict_host) + + def update_batch_prediction(self, batch_prediction_id, + batch_prediction_name): + """ + Updates the `BatchPredictionName` of a `BatchPrediction`. + + You can use the GetBatchPrediction operation to view the + contents of the updated data element. + + :type batch_prediction_id: string + :param batch_prediction_id: The ID assigned to the `BatchPrediction` + during creation. + + :type batch_prediction_name: string + :param batch_prediction_name: A new user-supplied name or description + of the `BatchPrediction`. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'BatchPredictionName': batch_prediction_name, + } + return self.make_request(action='UpdateBatchPrediction', + body=json.dumps(params)) + + def update_data_source(self, data_source_id, data_source_name): + """ + Updates the `DataSourceName` of a `DataSource`. + + You can use the GetDataSource operation to view the contents + of the updated data element. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` during + creation. + + :type data_source_name: string + :param data_source_name: A new user-supplied name or description of the + `DataSource` that will replace the current description. + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSourceName': data_source_name, + } + return self.make_request(action='UpdateDataSource', + body=json.dumps(params)) + + def update_evaluation(self, evaluation_id, evaluation_name): + """ + Updates the `EvaluationName` of an `Evaluation`. + + You can use the GetEvaluation operation to view the contents + of the updated data element. + + :type evaluation_id: string + :param evaluation_id: The ID assigned to the `Evaluation` during + creation. + + :type evaluation_name: string + :param evaluation_name: A new user-supplied name or description of the + `Evaluation` that will replace the current content. + + """ + params = { + 'EvaluationId': evaluation_id, + 'EvaluationName': evaluation_name, + } + return self.make_request(action='UpdateEvaluation', + body=json.dumps(params)) + + def update_ml_model(self, ml_model_id, ml_model_name=None, + score_threshold=None): + """ + Updates the `MLModelName` and the `ScoreThreshold` of an + `MLModel`. + + You can use the GetMLModel operation to view the contents of + the updated data element. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type score_threshold: float + :param score_threshold: The `ScoreThreshold` used in binary + classification `MLModel` that marks the boundary between a positive + prediction and a negative prediction. + Output values greater than or equal to the `ScoreThreshold` receive a + positive result from the `MLModel`, such as `True`. Output values + less than the `ScoreThreshold` receive a negative response from the + `MLModel`, such as `False`. + + """ + params = {'MLModelId': ml_model_id, } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if score_threshold is not None: + params['ScoreThreshold'] = score_threshold + return self.make_request(action='UpdateMLModel', + body=json.dumps(params)) + + def make_request(self, action, body, host=None): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request_kwargs = { + 'method':'POST', 'path':'/', 'auth_path':'/', 'params':{}, + 'headers': headers, 'data':body + } + if host is not None: + headers['Host'] = host + http_request_kwargs['host'] = host + http_request = self.build_base_http_request(**http_request_kwargs) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff -Nru python-boto-2.34.0/boto/mws/connection.py python-boto-2.38.0/boto/mws/connection.py --- python-boto-2.34.0/boto/mws/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/mws/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -292,7 +292,7 @@ return path splat = path.split('/') splat[-2] += '_Sandbox' - return splat.join('/') + return '/'.join(splat) def _required_auth_capability(self): return ['mws'] diff -Nru python-boto-2.34.0/boto/opsworks/layer1.py python-boto-2.38.0/boto/opsworks/layer1.py --- python-boto-2.34.0/boto/opsworks/layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/opsworks/layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -65,12 +65,18 @@ endpoint. You can then use the API to direct AWS OpsWorks to create stacks in any AWS Region. - **Chef Version** + **Chef Versions** When you call CreateStack, CloneStack, or UpdateStack we recommend you use the `ConfigurationManager` parameter to specify the Chef - version, 0.9, 11.4, or 11.10. The default value is currently 11.4. - For more information, see `Chef Versions`_. + version, 0.9, 11.4, or 11.10. The default value is currently + 11.10. For more information, see `Chef Versions`_. + + You can still specify Chef 0.9 for your stack, but new features + are not available for Chef 0.9 stacks, and support is scheduled to + end on July 24, 2014. We do not recommend using Chef 0.9 for new + stacks, and we recommend migrating your existing Chef 0.9 stacks + to Chef 11.10 as soon as possible. """ APIVersion = "2013-02-18" DefaultRegionName = "us-east-1" @@ -100,6 +106,33 @@ def _required_auth_capability(self): return ['hmac-v4'] + def assign_instance(self, instance_id, layer_ids): + """ + Assign a registered instance to a custom layer. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The layer ID, which must correspond to a custom + layer. You cannot assign a registered instance to a built-in layer. + + """ + params = { + 'InstanceId': instance_id, + 'LayerIds': layer_ids, + } + return self.make_request(action='AssignInstance', + body=json.dumps(params)) + def assign_volume(self, volume_id, instance_id=None): """ Assigns one of the stack's registered Amazon EBS volumes to a @@ -159,6 +192,13 @@ specified layer. For more information, see `Elastic Load Balancing`_. + + You must create the Elastic Load Balancing instance + separately, by using the Elastic Load Balancing console, API, + or CLI. For more information, see ` Elastic Load Balancing + Developer Guide`_. + + **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more @@ -214,7 +254,7 @@ :type vpc_id: string :param vpc_id: The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances - will be launched into this VPC, and you cannot change the ID later. + are launched into this VPC, and you cannot change the ID later. + If your account supports EC2 Classic, the default value is no VPC. + If your account does not support EC2 Classic, the default value is @@ -246,14 +286,20 @@ pairs to be added to the cloned stack. :type service_role_arn: string - :param service_role_arn: The stack AWS Identity and Access Management - (IAM) role, which allows AWS OpsWorks to work with AWS resources on - your behalf. You must set this parameter to the Amazon Resource - Name (ARN) for an existing IAM role. If you create a stack by using - the AWS OpsWorks console, it creates the role for you. You can - obtain an existing stack's IAM ARN programmatically by calling - DescribePermissions. For more information about IAM ARNs, see - `Using Identifiers`_. + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. If you create a stack by using the AWS OpsWorks + console, it creates the role for you. You can obtain an existing + stack's IAM ARN programmatically by calling DescribePermissions. + For more information about IAM ARNs, see `Using Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the source + stack's service role ARN, if you prefer, but you must do so + explicitly. :type default_instance_profile_arn: string :param default_instance_profile_arn: The ARN of an IAM profile that is @@ -261,9 +307,16 @@ information about IAM ARNs, see `Using Identifiers`_. :type default_os: string - :param default_os: The cloned stack's default operating system, which - must be set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default - option is `Amazon Linux`. + :param default_os: The stacks's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. :type hostname_theme: string :param hostname_theme: The stack's host name theme, with spaces are @@ -296,12 +349,13 @@ For more information, see the `VpcId` parameter description. :type default_subnet_id: string - :param default_subnet_id: The stack's default subnet ID. All instances - will be launched into this subnet unless you specify otherwise when - you create the instance. If you also specify a value for - `DefaultAvailabilityZone`, the subnet must be in the same zone. For - information on default values and when this parameter is required, - see the `VpcId` parameter description. + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. :type custom_json: string :param custom_json: A string that contains user-defined, custom JSON. @@ -424,7 +478,7 @@ def create_app(self, stack_id, name, type, shortname=None, description=None, data_sources=None, app_source=None, domains=None, enable_ssl=None, ssl_configuration=None, - attributes=None): + attributes=None, environment=None): """ Creates an app for a specified stack. For more information, see `Creating Apps`_. @@ -474,6 +528,17 @@ :param attributes: One or more user-defined key/value pairs to be added to the stack attributes. + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instance. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + """ params = {'StackId': stack_id, 'Name': name, 'Type': type, } if shortname is not None: @@ -492,24 +557,16 @@ params['SslConfiguration'] = ssl_configuration if attributes is not None: params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment return self.make_request(action='CreateApp', body=json.dumps(params)) def create_deployment(self, stack_id, command, app_id=None, instance_ids=None, comment=None, custom_json=None): """ - Deploys a stack or app. - - - + App deployment generates a `deploy` event, which runs the - associated recipes and passes them a JSON stack configuration - object that includes information about the app. - + Stack deployment runs the `deploy` recipes but does not - raise an event. - - - For more information, see `Deploying Apps`_ and `Run Stack - Commands`_. + Runs deployment or stack commands. For more information, see + `Deploying Apps`_ and `Run Stack Commands`_. **Required Permissions**: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an @@ -588,43 +645,36 @@ in the API Name column of the Available Instance Types table. :type auto_scaling_type: string - :param auto_scaling_type: - The instance auto scaling type, which has three possible values: - - - + **AlwaysRunning**: A 24/7 instance, which is not affected by auto - scaling. - + **TimeBasedAutoScaling**: A time-based auto scaling instance, which - is started and stopped based on a specified schedule. To specify - the schedule, call SetTimeBasedAutoScaling. - + **LoadBasedAutoScaling**: A load-based auto scaling instance, which - is started and stopped based on load metrics. To use load-based - auto scaling, you must enable it for the instance layer and - configure the thresholds by calling SetLoadBasedAutoScaling. + :param auto_scaling_type: For load-based or time-based instances, the + type. :type hostname: string :param hostname: The instance host name. :type os: string - :param os: The instance operating system, which must be set to one of + :param os: The instance's operating system, which must be set to one of the following. - + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS` + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + Custom AMIs: `Custom` - The default option is `Amazon Linux`. If you set this parameter to - `Custom`, you must use the CreateInstance action's AmiId parameter - to specify the custom AMI that you want to use. For more - information on the standard operating systems, see `Operating + The default option is the current Amazon Linux version. If you set this + parameter to `Custom`, you must use the CreateInstance action's + AmiId parameter to specify the custom AMI that you want to use. For + more information on the standard operating systems, see `Operating Systems`_For more information on how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_. :type ami_id: string - :param ami_id: A custom AMI ID to be used to create the instance. The - AMI should be based on one of the standard AWS OpsWorks APIs: - Amazon Linux or Ubuntu 12.04 LTS. For more information, see - `Instances`_ + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see + `Instances`_. + + If you specify a custom AMI, you must set `Os` to `Custom`. :type ssh_key_name: string :param ssh_key_name: The instance SSH key name. @@ -655,13 +705,17 @@ information, see `Storage for the Root Device`_. :type install_updates_on_boot: boolean - :param install_updates_on_boot: Whether to install operating system and - package updates when the instance boots. The default value is - `True`. To control when updates are installed, set this value to - `False`. You must then update your instances manually by using - CreateDeployment to run the `update_dependencies` stack command or - manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the - instances. + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True` to ensure that + your instances have the latest security updates. :type ebs_optimized: boolean :param ebs_optimized: Whether to create an Amazon EBS-optimized @@ -707,11 +761,22 @@ auto_assign_elastic_ips=None, auto_assign_public_ips=None, custom_recipes=None, install_updates_on_boot=None, - use_ebs_optimized_instances=None): + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): """ Creates a layer. For more information, see `How to Create a Layer`_. + + You should use **CreateLayer** for noncustom layer types such + as PHP App Server only if the stack does not have an existing + layer of that type. A stack can have at most one instance of + each noncustom layer; if you attempt to create a second + instance, **CreateLayer** fails. A stack can have an arbitrary + number of custom layers, so you can call **CreateLayer** as + many times as you like for that layer type. + + **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more @@ -722,22 +787,8 @@ :param stack_id: The layer stack ID. :type type: string - :param type: - The layer type. A stack cannot have more than one built-in layer of the - same type. It can have any number of custom layers. This parameter - must be set to one of the following: - - - + custom: A custom layer - + db-master: A MySQL layer - + java-app: A Java App Server layer - + rails-app: A Rails App Server layer - + lb: An HAProxy layer - + memcached: A Memcached layer - + monitoring-master: A Ganglia layer - + nodejs-app: A Node.js App Server layer - + php-app: A PHP App Server layer - + web: A Static Web Server layer + :param type: The layer type. A stack cannot have more than one built-in + layer of the same type. It can have any number of custom layers. :type name: string :param name: The layer name, which is used by the console. @@ -789,18 +840,28 @@ layer custom recipes. :type install_updates_on_boot: boolean - :param install_updates_on_boot: Whether to install operating system and - package updates when the instance boots. The default value is - `True`. To control when updates are installed, set this value to - `False`. You must then update your instances manually by using - CreateDeployment to run the `update_dependencies` stack command or - manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the - instances. + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. :type use_ebs_optimized_instances: boolean :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized instances. + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: A LifeCycleEventConfiguration + object that you can use to configure the Shutdown event to specify + an execution timeout and enable or disable Elastic Load Balancer + connection draining. + """ params = { 'StackId': stack_id, @@ -830,6 +891,8 @@ params['InstallUpdatesOnBoot'] = install_updates_on_boot if use_ebs_optimized_instances is not None: params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration return self.make_request(action='CreateLayer', body=json.dumps(params)) @@ -860,8 +923,8 @@ :type vpc_id: string :param vpc_id: The ID of the VPC that the stack is to be launched into. - It must be in the specified region. All instances will be launched - into this VPC, and you cannot change the ID later. + It must be in the specified region. All instances are launched into + this VPC, and you cannot change the ID later. + If your account supports EC2 Classic, the default value is no VPC. + If your account does not support EC2 Classic, the default value is @@ -905,9 +968,16 @@ information about IAM ARNs, see `Using Identifiers`_. :type default_os: string - :param default_os: The stack's default operating system, which must be - set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is - `Amazon Linux`. + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. :type hostname_theme: string :param hostname_theme: The stack's host name theme, with spaces are @@ -940,12 +1010,13 @@ information, see the `VpcId` parameter description. :type default_subnet_id: string - :param default_subnet_id: The stack's default subnet ID. All instances - will be launched into this subnet unless you specify otherwise when - you create the instance. If you also specify a value for - `DefaultAvailabilityZone`, the subnet must be in that zone. For - information on default values and when this parameter is required, - see the `VpcId` parameter description. + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. :type custom_json: string :param custom_json: A string that contains user-defined, custom JSON. @@ -1111,9 +1182,11 @@ def delete_instance(self, instance_id, delete_elastic_ip=None, delete_volumes=None): """ - Deletes a specified instance. You must stop an instance before - you can delete it. For more information, see `Deleting - Instances`_. + Deletes a specified instance, which terminates the associated + Amazon EC2 instance. You must stop an instance before you can + delete it. + + For more information, see `Deleting Instances`_. **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached @@ -1144,8 +1217,8 @@ def delete_layer(self, layer_id): """ Deletes a specified layer. You must first stop and then delete - all associated instances. For more information, see `How to - Delete a Layer`_. + all associated instances or unassign registered instances. For + more information, see `How to Delete a Layer`_. **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached @@ -1164,8 +1237,8 @@ def delete_stack(self, stack_id): """ Deletes a specified stack. You must first delete all - instances, layers, and apps. For more information, see `Shut - Down a Stack`_. + instances, layers, and apps or deregister registered + instances. For more information, see `Shut Down a Stack`_. **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached @@ -1218,10 +1291,37 @@ return self.make_request(action='DeregisterElasticIp', body=json.dumps(params)) + def deregister_instance(self, instance_id): + """ + Deregister a registered Amazon EC2 or on-premises instance. + This action removes the instance from the stack and returns it + to your control. This action can not be used with instances + that were created with AWS OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='DeregisterInstance', + body=json.dumps(params)) + def deregister_rds_db_instance(self, rds_db_instance_arn): """ Deregisters an Amazon RDS instance. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type rds_db_instance_arn: string :param rds_db_instance_arn: The Amazon RDS instance's ARN. @@ -1254,8 +1354,10 @@ """ Requests a description of a specified set of apps. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1286,8 +1388,10 @@ """ Describes the results of specified commands. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1326,8 +1430,10 @@ """ Requests a description of a specified set of deployments. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1365,8 +1471,10 @@ """ Describes `Elastic IP addresses`_. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1404,8 +1512,10 @@ """ Describes a stack's Elastic Load Balancing instances. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1434,8 +1544,10 @@ """ Requests a description of a set of instances. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1474,8 +1586,10 @@ Requests a description of one or more layers in a specified stack. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1504,8 +1618,10 @@ Describes load-based auto scaling configurations for specified layers. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1539,8 +1655,6 @@ """ Describes the permissions for a specified stack. - You must specify at least one of the parameters. - **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more @@ -1563,12 +1677,15 @@ return self.make_request(action='DescribePermissions', body=json.dumps(params)) - def describe_raid_arrays(self, instance_id=None, raid_array_ids=None): + def describe_raid_arrays(self, instance_id=None, stack_id=None, + raid_array_ids=None): """ Describe an instance's RAID arrays. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1580,6 +1697,9 @@ `DescribeRaidArrays` returns descriptions of the RAID arrays associated with the specified instance. + :type stack_id: string + :param stack_id: The stack ID. + :type raid_array_ids: list :param raid_array_ids: An array of RAID array IDs. If you use this parameter, `DescribeRaidArrays` returns descriptions of the @@ -1590,6 +1710,8 @@ params = {} if instance_id is not None: params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id if raid_array_ids is not None: params['RaidArrayIds'] = raid_array_ids return self.make_request(action='DescribeRaidArrays', @@ -1599,6 +1721,12 @@ """ Describes Amazon RDS instances. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: The stack ID that the instances are registered with. The operation returns descriptions of all registered Amazon RDS @@ -1653,6 +1781,24 @@ return self.make_request(action='DescribeServiceErrors', body=json.dumps(params)) + def describe_stack_provisioning_parameters(self, stack_id): + """ + Requests a description of a stack's provisioning parameters. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the stack + or an attached policy that explicitly grants permissions. For + more information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackProvisioningParameters', + body=json.dumps(params)) + def describe_stack_summary(self, stack_id): """ Describes the number of layers and apps in a specified stack, @@ -1700,6 +1846,10 @@ Describes time-based auto scaling configurations for specified instances. + + You must specify at least one of the parameters. + + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1739,8 +1889,10 @@ """ Describes an instance's Amazon EBS volumes. + You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants @@ -1890,11 +2042,81 @@ return self.make_request(action='RegisterElasticIp', body=json.dumps(params)) + def register_instance(self, stack_id, hostname=None, public_ip=None, + private_ip=None, rsa_public_key=None, + rsa_public_key_fingerprint=None, + instance_identity=None): + """ + Registers instances with a specified stack that were created + outside of AWS OpsWorks. + + We do not recommend using this action to register instances. + The complete registration operation has two primary steps, + installing the AWS OpsWorks agent on the instance and + registering the instance with the stack. `RegisterInstance` + handles only the second step. You should instead use the AWS + CLI `register` command, which performs the entire registration + operation. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The ID of the stack that the instance is to be + registered with. + + :type hostname: string + :param hostname: The instance's hostname. + + :type public_ip: string + :param public_ip: The instance's public IP address. + + :type private_ip: string + :param private_ip: The instance's private IP address. + + :type rsa_public_key: string + :param rsa_public_key: The instances public RSA key. This key is used + to encrypt communication between the instance and the service. + + :type rsa_public_key_fingerprint: string + :param rsa_public_key_fingerprint: The instances public RSA key + fingerprint. + + :type instance_identity: dict + :param instance_identity: An InstanceIdentity object that contains the + instance's identity. + + """ + params = {'StackId': stack_id, } + if hostname is not None: + params['Hostname'] = hostname + if public_ip is not None: + params['PublicIp'] = public_ip + if private_ip is not None: + params['PrivateIp'] = private_ip + if rsa_public_key is not None: + params['RsaPublicKey'] = rsa_public_key + if rsa_public_key_fingerprint is not None: + params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint + if instance_identity is not None: + params['InstanceIdentity'] = instance_identity + return self.make_request(action='RegisterInstance', + body=json.dumps(params)) + def register_rds_db_instance(self, stack_id, rds_db_instance_arn, db_user, db_password): """ Registers an Amazon RDS instance with a stack. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1951,6 +2173,14 @@ specified layer. For more information, see `Managing Load with Time-based and Load-based Instances`_. + + To use load-based auto scaling, you must create a set of load- + based auto scaling instances. Load-based auto scaling operates + only on the instances from that set, so you must ensure that + you have created enough instances to handle the maximum + anticipated load. + + **Required Permissions**: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more @@ -2141,6 +2371,28 @@ return self.make_request(action='StopStack', body=json.dumps(params)) + def unassign_instance(self, instance_id): + """ + Unassigns a registered instance from all of it's layers. The + instance remains in the stack as an unassigned instance and + can be assigned to another layer, as needed. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='UnassignInstance', + body=json.dumps(params)) + def unassign_volume(self, volume_id): """ Unassigns an assigned Amazon EBS volume. The volume remains @@ -2164,7 +2416,7 @@ def update_app(self, app_id, name=None, description=None, data_sources=None, type=None, app_source=None, domains=None, enable_ssl=None, ssl_configuration=None, - attributes=None): + attributes=None, environment=None): """ Updates a specified app. @@ -2207,6 +2459,17 @@ :param attributes: One or more user-defined key/value pairs to be added to the stack attributes. + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instances. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + """ params = {'AppId': app_id, } if name is not None: @@ -2227,6 +2490,8 @@ params['SslConfiguration'] = ssl_configuration if attributes is not None: params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment return self.make_request(action='UpdateApp', body=json.dumps(params)) @@ -2282,41 +2547,37 @@ in the API Name column of the Available Instance Types table. :type auto_scaling_type: string - :param auto_scaling_type: - The instance's auto scaling type, which has three possible values: - - - + **AlwaysRunning**: A 24/7 instance, which is not affected by auto - scaling. - + **TimeBasedAutoScaling**: A time-based auto scaling instance, which - is started and stopped based on a specified schedule. - + **LoadBasedAutoScaling**: A load-based auto scaling instance, which - is started and stopped based on load metrics. + :param auto_scaling_type: For load-based or time-based instances, the + type. :type hostname: string :param hostname: The instance host name. :type os: string - :param os: The instance operating system, which must be set to one of + :param os: The instance's operating system, which must be set to one of the following. - + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS` + + Standard operating systems: An Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + Custom AMIs: `Custom` - The default option is `Amazon Linux`. If you set this parameter to - `Custom`, you must use the CreateInstance action's AmiId parameter - to specify the custom AMI that you want to use. For more - information on the standard operating systems, see `Operating - Systems`_For more information on how to use custom AMIs with - OpsWorks, see `Using Custom AMIs`_. + The default option is the current Amazon Linux version, such as `Amazon + Linux 2014.09`. If you set this parameter to `Custom`, you must use + the CreateInstance action's AmiId parameter to specify the custom + AMI that you want to use. For more information on the standard + operating systems, see `Operating Systems`_For more information on + how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_. :type ami_id: string - :param ami_id: A custom AMI ID to be used to create the instance. The - AMI should be based on one of the standard AWS OpsWorks APIs: - Amazon Linux or Ubuntu 12.04 LTS. For more information, see + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see `Instances`_ + If you specify a custom AMI, you must set `Os` to `Custom`. + :type ssh_key_name: string :param ssh_key_name: The instance SSH key name. @@ -2327,13 +2588,17 @@ see `Instance Families and Types`_. :type install_updates_on_boot: boolean - :param install_updates_on_boot: Whether to install operating system and - package updates when the instance boots. The default value is - `True`. To control when updates are installed, set this value to - `False`. You must then update your instances manually by using - CreateDeployment to run the `update_dependencies` stack command or - manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the - instances. + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. :type ebs_optimized: boolean :param ebs_optimized: Whether this is an Amazon EBS-optimized instance. @@ -2370,7 +2635,8 @@ auto_assign_elastic_ips=None, auto_assign_public_ips=None, custom_recipes=None, install_updates_on_boot=None, - use_ebs_optimized_instances=None): + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): """ Updates a specified layer. @@ -2433,18 +2699,25 @@ layer's custom recipes. :type install_updates_on_boot: boolean - :param install_updates_on_boot: Whether to install operating system and - package updates when the instance boots. The default value is - `True`. To control when updates are installed, set this value to - `False`. You must then update your instances manually by using - CreateDeployment to run the `update_dependencies` stack command or - manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the - instances. + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. :type use_ebs_optimized_instances: boolean :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized instances. + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: + """ params = {'LayerId': layer_id, } if name is not None: @@ -2473,6 +2746,8 @@ params['InstallUpdatesOnBoot'] = install_updates_on_boot if use_ebs_optimized_instances is not None: params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration return self.make_request(action='UpdateLayer', body=json.dumps(params)) @@ -2500,6 +2775,12 @@ """ Updates an Amazon RDS instance. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type rds_db_instance_arn: string :param rds_db_instance_arn: The Amazon RDS instance's ARN. @@ -2548,11 +2829,18 @@ to the stack attributes. :type service_role_arn: string - :param service_role_arn: The stack AWS Identity and Access Management - (IAM) role, which allows AWS OpsWorks to work with AWS resources on - your behalf. You must set this parameter to the Amazon Resource - Name (ARN) for an existing IAM role. For more information about IAM - ARNs, see `Using Identifiers`_. + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. For more information about IAM ARNs, see `Using + Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the stack's + current service role ARN, if you prefer, but you must do so + explicitly. :type default_instance_profile_arn: string :param default_instance_profile_arn: The ARN of an IAM profile that is @@ -2560,9 +2848,16 @@ information about IAM ARNs, see `Using Identifiers`_. :type default_os: string - :param default_os: The stack's default operating system, which must be - set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is - `Amazon Linux`. + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. :type hostname_theme: string :param hostname_theme: The stack's new host name theme, with spaces are @@ -2595,11 +2890,13 @@ information, see CreateStack. :type default_subnet_id: string - :param default_subnet_id: The stack's default subnet ID. All instances - will be launched into this subnet unless you specify otherwise when - you create the instance. If you also specify a value for - `DefaultAvailabilityZone`, the subnet must be in that zone. For - more information, see CreateStack. + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. :type custom_json: string :param custom_json: A string that contains user-defined, custom JSON. @@ -2794,3 +3091,4 @@ exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body) + diff -Nru python-boto-2.34.0/boto/provider.py python-boto-2.38.0/boto/provider.py --- python-boto-2.34.0/boto/provider.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/provider.py 2015-04-09 18:57:51.000000000 +0000 @@ -67,6 +67,7 @@ STORAGE_DATA_ERROR = 'StorageDataError' STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError' STORAGE_RESPONSE_ERROR = 'StorageResponseError' +NO_CREDENTIALS_PROVIDED = object() class ProfileNotFoundError(ValueError): diff -Nru python-boto-2.34.0/boto/pyami/config.py python-boto-2.38.0/boto/pyami/config.py --- python-boto-2.34.0/boto/pyami/config.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/pyami/config.py 2015-04-09 18:57:51.000000000 +0000 @@ -42,10 +42,10 @@ BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] # If there's a BOTO_PATH variable set, we use anything there -# as the current configuration locations, split with colons +# as the current configuration locations, split with os.pathsep. elif 'BOTO_PATH' in os.environ: BotoConfigLocations = [] - for path in os.environ['BOTO_PATH'].split(":"): + for path in os.environ['BOTO_PATH'].split(os.pathsep): BotoConfigLocations.append(expanduser(path)) diff -Nru python-boto-2.34.0/boto/route53/connection.py python-boto-2.38.0/boto/route53/connection.py --- python-boto-2.34.0/boto/route53/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/route53/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -47,6 +47,19 @@ """ +HZPXML = """ + + %(name)s + + %(vpc_id)s + %(vpc_region)s + + %(caller_ref)s + + %(comment)s + +""" + # boto.set_stream_logger('dns') @@ -162,7 +175,8 @@ if zone['Name'] == hosted_zone_name: return self.get_hosted_zone(zone['Id'].split('/')[-1]) - def create_hosted_zone(self, domain_name, caller_ref=None, comment=''): + def create_hosted_zone(self, domain_name, caller_ref=None, comment='', + private_zone=False, vpc_id=None, vpc_region=None): """ Create a new Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. @@ -189,14 +203,34 @@ :param comment: Any comments you want to include about the hosted zone. + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_id: When creating a private hosted zone, the region of + the associated VPC is required. + """ if caller_ref is None: caller_ref = str(uuid.uuid4()) - params = {'name': domain_name, - 'caller_ref': caller_ref, - 'comment': comment, - 'xmlns': self.XMLNameSpace} - xml_body = HZXML % params + if private_zone: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'vpc_id': vpc_id, + 'vpc_region': vpc_region, + 'xmlns': self.XMLNameSpace} + xml_body = HZPXML % params + else: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'xmlns': self.XMLNameSpace} + xml_body = HZXML % params uri = '/%s/hostedzone' % self.Version response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) @@ -301,7 +335,25 @@ raise exception.DNSServerError(response.status, response.reason, body) - e = boto.jsonresponse.Element(list_marker='HealthChecks', item_marker=('HealthCheck',)) + e = boto.jsonresponse.Element(list_marker='HealthChecks', + item_marker=('HealthCheck',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_checker_ip_ranges(self): + """ + Return a list of Route53 healthcheck IP ranges + """ + uri = '/%s/checkeripranges' % self.Version + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',)) h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e @@ -451,7 +503,8 @@ h.parse(body) return e - def create_zone(self, name): + def create_zone(self, name, private_zone=False, + vpc_id=None, vpc_region=None): """ Create a new Hosted Zone. Returns a Zone object for the newly created Hosted Zone. @@ -465,8 +518,20 @@ It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. + + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_id: When creating a private hosted zone, the region of + the associated VPC is required. """ - zone = self.create_hosted_zone(name) + zone = self.create_hosted_zone(name, private_zone=private_zone, + vpc_id=vpc_id, vpc_region=vpc_region) return Zone(self, zone['CreateHostedZoneResponse']['HostedZone']) def get_zone(self, name): diff -Nru python-boto-2.34.0/boto/route53/hostedzone.py python-boto-2.38.0/boto/route53/hostedzone.py --- python-boto-2.34.0/boto/route53/hostedzone.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/route53/hostedzone.py 2015-04-09 18:57:51.000000000 +0000 @@ -26,20 +26,15 @@ class HostedZone(object): def __init__(self, id=None, name=None, owner=None, version=None, - caller_reference=None, config=None): + caller_reference=None): self.id = id self.name = name self.owner = owner self.version = version self.caller_reference = caller_reference - self.config = config def startElement(self, name, attrs, connection): - if name == 'Config': - self.config = Config() - return self.config - else: - return None + return None def endElement(self, name, value, connection): if name == 'Id': diff -Nru python-boto-2.34.0/boto/route53/record.py python-boto-2.38.0/boto/route53/record.py --- python-boto-2.34.0/boto/route53/record.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/route53/record.py 2015-04-09 18:57:51.000000000 +0000 @@ -123,7 +123,7 @@ a value that determines which region this should be associated with for the latency-based routing - :type alias_evaluate_target_health: Boolean + :type alias_evaluate_target_health: bool :param alias_evaluate_target_health: *Required for alias resource record sets* Indicates whether this Resource Record Set should respect the health status of any health checks associated with the ALIAS target diff -Nru python-boto-2.34.0/boto/s3/acl.py python-boto-2.38.0/boto/s3/acl.py --- python-boto-2.34.0/boto/s3/acl.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/s3/acl.py 2015-04-09 18:57:51.000000000 +0000 @@ -53,7 +53,7 @@ def startElement(self, name, attrs, connection): if name == 'AccessControlPolicy': self.namespace = attrs.get('xmlns', None) - return None + return None if name == 'Owner': self.owner = User(self) return self.owner @@ -75,12 +75,13 @@ if self.namespace is not None: s = ''.format(self.namespace) else: - s = '' + s = '' s += self.owner.to_xml() s += self.acl.to_xml() s += '' return s + class ACL(object): def __init__(self, policy=None): @@ -119,6 +120,7 @@ s += '' return s + class Grant(object): NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' @@ -167,5 +169,3 @@ s += '%s' % self.permission s += '' return s - - diff -Nru python-boto-2.34.0/boto/s3/__init__.py python-boto-2.38.0/boto/s3/__init__.py --- python-boto-2.34.0/boto/s3/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/s3/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -1,5 +1,6 @@ # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2014, Steven Richards # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a @@ -59,6 +60,15 @@ def connect_to_region(region_name, **kw_params): for region in regions(): + if 'host' in kw_params.keys(): + # Make sure the host specified is not nothing + if kw_params['host'] not in ['', None]: + region.endpoint = kw_params['host'] + del kw_params['host'] + return region.connect(**kw_params) + # If it is nothing then remove it from kw_params and proceed with default + else: + del kw_params['host'] if region.name == region_name: return region.connect(**kw_params) return None diff -Nru python-boto-2.34.0/boto/s3/key.py python-boto-2.38.0/boto/s3/key.py --- python-boto-2.34.0/boto/s3/key.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/s3/key.py 2015-04-09 18:57:51.000000000 +0000 @@ -426,12 +426,13 @@ :param validate_dst_bucket: If True, will validate the dst_bucket by using an extra list request. """ + bucket_name = dst_bucket or self.bucket.name if new_storage_class == 'STANDARD': - return self.copy(self.bucket.name, self.name, + return self.copy(bucket_name, self.name, reduced_redundancy=False, preserve_acl=True, validate_dst_bucket=validate_dst_bucket) elif new_storage_class == 'REDUCED_REDUNDANCY': - return self.copy(self.bucket.name, self.name, + return self.copy(bucket_name, self.name, reduced_redundancy=True, preserve_acl=True, validate_dst_bucket=validate_dst_bucket) else: @@ -495,7 +496,8 @@ self.name, metadata, storage_class=storage_class, preserve_acl=preserve_acl, - encrypt_key=encrypt_key) + encrypt_key=encrypt_key, + src_version_id=self.version_id) def startElement(self, name, attrs, connection): if name == 'Owner': @@ -1673,7 +1675,7 @@ the second representing the size of the to be transmitted object. - :type cb: int + :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the diff -Nru python-boto-2.34.0/boto/ses/connection.py python-boto-2.38.0/boto/ses/connection.py --- python-boto-2.34.0/boto/ses/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/ses/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -104,8 +104,8 @@ body = response.read().decode('utf-8') if response.status == 200: list_markers = ('VerifiedEmailAddresses', 'Identities', - 'DkimTokens', 'VerificationAttributes', - 'SendDataPoints') + 'DkimTokens', 'DkimAttributes', + 'VerificationAttributes', 'SendDataPoints') item_markers = ('member', 'item', 'entry') e = boto.jsonresponse.Element(list_marker=list_markers, diff -Nru python-boto-2.34.0/boto/sqs/connection.py python-boto-2.38.0/boto/sqs/connection.py --- python-boto-2.34.0/boto/sqs/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/sqs/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -111,6 +111,18 @@ """ return self.get_status('DeleteQueue', None, queue.id) + def purge_queue(self, queue): + """ + Purge all messages in an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be purged + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('PurgeQueue', None, queue.id) + def get_queue_attributes(self, queue, attribute='All'): """ Gets one or all attributes of a Queue diff -Nru python-boto-2.34.0/boto/sqs/message.py python-boto-2.38.0/boto/sqs/message.py --- python-boto-2.34.0/boto/sqs/message.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/sqs/message.py 2015-04-09 18:57:51.000000000 +0000 @@ -68,6 +68,7 @@ import boto from boto.compat import StringIO +from boto.compat import six from boto.sqs.attributes import Attributes from boto.sqs.messageattributes import MessageAttributes from boto.exception import SQSDecodeError @@ -163,7 +164,9 @@ """ def encode(self, value): - return base64.b64encode(value.encode('utf-8')).decode('utf-8') + if not isinstance(value, six.binary_type): + value = value.encode('utf-8') + return base64.b64encode(value).decode('utf-8') def decode(self, value): try: diff -Nru python-boto-2.34.0/boto/sqs/queue.py python-boto-2.38.0/boto/sqs/queue.py --- python-boto-2.34.0/boto/sqs/queue.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/sqs/queue.py 2015-04-09 18:57:51.000000000 +0000 @@ -340,16 +340,15 @@ """ return self.connection.delete_queue(self) + def purge(self): + """ + Purge all messages in the queue. + """ + return self.connection.purge_queue(self) + def clear(self, page_size=10, vtimeout=10): - """Utility function to remove all messages from a queue""" - n = 0 - l = self.get_messages(page_size, vtimeout) - while l: - for m in l: - self.delete_message(m) - n += 1 - l = self.get_messages(page_size, vtimeout) - return n + """Deprecated utility function to remove all messages from a queue""" + return self.purge() def count(self, page_size=10, vtimeout=10): """ diff -Nru python-boto-2.34.0/boto/sts/connection.py python-boto-2.38.0/boto/sts/connection.py --- python-boto-2.34.0/boto/sts/connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/sts/connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -22,6 +22,7 @@ # IN THE SOFTWARE. from boto.connection import AWSQueryConnection +from boto.provider import Provider, NO_CREDENTIALS_PROVIDED from boto.regioninfo import RegionInfo from boto.sts.credentials import Credentials, FederationToken, AssumedRole from boto.sts.credentials import DecodeAuthorizationMessage @@ -71,6 +72,13 @@ https_connection_factory=None, region=None, path='/', converter=None, validate_certs=True, anon=False, security_token=None, profile_name=None): + """ + :type anon: boolean + :param anon: If this parameter is True, the ``STSConnection`` object + will make anonymous requests, and it will not use AWS + Credentials or even search for AWS Credentials to make these + requests. + """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, @@ -78,6 +86,15 @@ self.region = region self.anon = anon self._mutex = threading.Semaphore() + provider = 'aws' + # If an anonymous request is sent, do not try to look for credentials. + # So we pass in dummy values for the access key id, secret access + # key, and session token. It does not matter that they are + # not actual values because the request is anonymous. + if self.anon: + provider = Provider('aws', NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED) super(STSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, @@ -86,11 +103,12 @@ https_connection_factory, path, validate_certs=validate_certs, security_token=security_token, - profile_name=profile_name) + profile_name=profile_name, + provider=provider) def _required_auth_capability(self): if self.anon: - return ['pure-query'] + return ['sts-anon'] else: return ['hmac-v4'] diff -Nru python-boto-2.34.0/boto/swf/layer2.py python-boto-2.38.0/boto/swf/layer2.py --- python-boto-2.34.0/boto/swf/layer2.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/swf/layer2.py 2015-04-09 18:57:51.000000000 +0000 @@ -79,6 +79,7 @@ 'aws_access_key_id': self.aws_access_key_id, 'aws_secret_access_key': self.aws_secret_access_key, 'domain': self.name, + 'region': self.region, }) act_objects.append(ActivityType(**act_args)) return act_objects @@ -96,6 +97,7 @@ 'aws_access_key_id': self.aws_access_key_id, 'aws_secret_access_key': self.aws_secret_access_key, 'domain': self.name, + 'region': self.region, }) wf_objects.append(WorkflowType(**wf_args)) @@ -128,6 +130,7 @@ 'aws_access_key_id': self.aws_access_key_id, 'aws_secret_access_key': self.aws_secret_access_key, 'domain': self.name, + 'region': self.region, }) exe_objects.append(WorkflowExecution(**exe_args)) diff -Nru python-boto-2.34.0/boto/vpc/__init__.py python-boto-2.38.0/boto/vpc/__init__.py --- python-boto-2.34.0/boto/vpc/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/vpc/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -1685,3 +1685,144 @@ return self.get_object('AcceptVpcPeeringConnection', params, VpcPeeringConnection) + def get_all_classic_link_vpcs(self, vpc_ids=None, filters=None, + dry_run=False): + """ + Describes the ClassicLink status of one or more VPCs. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcClassicLink', params, [('item', VPC)], + verb='POST') + + def attach_classic_link_vpc(self, vpc_id, instance_id, groups, + dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type vpc_id: str + :param vpc_id: The ID of a ClassicLink-enabled VPC. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + l = [] + for group in groups: + if hasattr(group, 'id'): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroupId') + return self.get_status('AttachClassicLinkVpc', params) + + def detach_classic_link_vpc(self, vpc_id, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type vpc_id: str + :param vpc_id: The ID of the instance to unlink from the VPC. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachClassicLinkVpc', params) + + def disable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVpcClassicLink', params) + + def enable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVpcClassicLink', params) diff -Nru python-boto-2.34.0/boto/vpc/vpc.py python-boto-2.38.0/boto/vpc/vpc.py --- python-boto-2.34.0/boto/vpc/vpc.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/boto/vpc/vpc.py 2015-04-09 18:57:51.000000000 +0000 @@ -38,6 +38,7 @@ :ivar cidr_block: The CIDR block for the VPC. :ivar is_default: Indicates whether the VPC is the default VPC. :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC. + :ivar classic_link_enabled: Indicates whether ClassicLink is enabled. """ super(VPC, self).__init__(connection) self.id = None @@ -46,6 +47,7 @@ self.cidr_block = None self.is_default = None self.instance_tenancy = None + self.classic_link_enabled = None def __repr__(self): return 'VPC:%s' % self.id @@ -63,6 +65,8 @@ self.is_default = True if value == 'true' else False elif name == 'instanceTenancy': self.instance_tenancy = value + elif name == 'classicLinkEnabled': + self.classic_link_enabled = value else: setattr(self, name, value) @@ -72,8 +76,9 @@ def _update(self, updated): self.__dict__.update(updated.__dict__) - def update(self, validate=False, dry_run=False): - vpc_list = self.connection.get_all_vpcs( + def _get_status_then_update_vpc(self, get_status_method, validate=False, + dry_run=False): + vpc_list = get_status_method( [self.id], dry_run=dry_run ) @@ -82,4 +87,118 @@ self._update(updated_vpc) elif validate: raise ValueError('%s is not a valid VPC ID' % (self.id,)) + + def update(self, validate=False, dry_run=False): + self._get_status_then_update_vpc( + self.connection.get_all_vpcs, + validate=validate, + dry_run=dry_run + ) return self.state + + def update_classic_link_enabled(self, validate=False, dry_run=False): + """ + Updates instance's classic_link_enabled attribute + + :rtype: bool + :return: self.classic_link_enabled after update has occurred. + """ + self._get_status_then_update_vpc( + self.connection.get_all_classic_link_vpcs, + validate=validate, + dry_run=dry_run + ) + return self.classic_link_enabled + + def disable_classic_link(self, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.disable_vpc_classic_link(self.id, + dry_run=dry_run) + + def enable_classic_link(self, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.enable_vpc_classic_link(self.id, + dry_run=dry_run) + + def attach_classic_instance(self, instance_id, groups, dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + groups=groups, + dry_run=dry_run + ) + + def detach_classic_instance(self, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.detach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + dry_run=dry_run + ) diff -Nru python-boto-2.34.0/debian/changelog python-boto-2.38.0/debian/changelog --- python-boto-2.34.0/debian/changelog 2015-07-09 15:16:15.000000000 +0000 +++ python-boto-2.38.0/debian/changelog 2015-07-09 15:16:15.000000000 +0000 @@ -1,3 +1,17 @@ +python-boto (2.38.0-1ubuntu1) wily; urgency=low + + * Merge from Debian unstable. Remaining changes: + * d/tests/unit: Fix autopkgtest error in test_timeout. + * d/tests/unit: run tests/test.py with each python in 'pyversions -i' + + -- James Page Thu, 09 Jul 2015 16:11:38 +0100 + +python-boto (2.38.0-1) unstable; urgency=medium + + * New upstream release. + + -- Eric Evans Sun, 14 Jun 2015 09:43:42 -0500 + python-boto (2.34.0-2ubuntu1) vivid; urgency=low * Merge from Debian unstable. Remaining changes: diff -Nru python-boto-2.34.0/docs/source/dynamodb2_tut.rst python-boto-2.38.0/docs/source/dynamodb2_tut.rst --- python-boto-2.34.0/docs/source/dynamodb2_tut.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/dynamodb2_tut.rst 2015-04-09 18:57:51.000000000 +0000 @@ -37,7 +37,11 @@ -------------------- To create a new table, you need to call ``Table.create`` & specify (at a -minimum) both the table's name as well as the key schema for the table. +minimum) both the table's name as well as the key schema for the table:: + + >>> from boto.dynamodb2.fields import HashKey + >>> from boto.dynamodb2.table import Table + >>> users = Table.create('users', schema=[HashKey('username')]); Since both the key schema and local secondary indexes can not be modified after the table is created, you'll need to plan ahead of time how you @@ -60,37 +64,34 @@ duplicates only the keys from the schema onto the index. The ``IncludeIndex`` lets you specify a list of fieldnames to duplicate over. -Simple example:: - - >>> from boto.dynamodb2.fields import HashKey - >>> from boto.dynamodb2.table import Table - - # Uses your ``aws_access_key_id`` & ``aws_secret_access_key`` from either a - # config file or environment variable & the default region. - >>> users = Table.create('users', schema=[ - ... HashKey('username'), - ... ]) - A full example:: >>> import boto.dynamodb2 - >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, AllIndex + >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, GlobalAllIndex >>> from boto.dynamodb2.table import Table >>> from boto.dynamodb2.types import NUMBER + # Uses your ``aws_access_key_id`` & ``aws_secret_access_key`` from either a + # config file or environment variable & the default region. >>> users = Table.create('users', schema=[ - ... HashKey('account_type', data_type=NUMBER), + ... HashKey('username'), # defaults to STRING data_type ... RangeKey('last_name'), ... ], throughput={ ... 'read': 5, ... 'write': 15, - ... }, indexes=[ - ... AllIndex('EverythingIndex', parts=[ - ... HashKey('account_type', data_type=NUMBER), - ... ]) + ... }, global_indexes=[ + ... GlobalAllIndex('EverythingIndex', parts=[ + ... HashKey('account_type'), + ... ], + ... throughput={ + ... 'read': 1, + ... 'write': 1, + ... }) ... ], - ... # If you need to specify custom parameters like keys or region info... - ... connection= boto.dynamodb2.connect_to_region('us-east-1')) + ... # If you need to specify custom parameters, such as credentials or region, + ... # use the following: + ... # connection=boto.dynamodb2.connect_to_region('us-east-1') + ... ) Using an Existing Table @@ -108,15 +109,15 @@ Efficient example:: - >>> from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex + >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex >>> from boto.dynamodb2.table import Table >>> from boto.dynamodb2.types import NUMBER >>> users = Table('users', schema=[ - ... HashKey('account_type', data_type=NUMBER), + ... HashKey('username'), ... RangeKey('last_name'), - ... ], indexes=[ - ... AllIndex('EverythingIndex', parts=[ - ... HashKey('account_type', data_type=NUMBER), + ... ], global_indexes=[ + ... GlobalAllIndex('EverythingIndex', parts=[ + ... HashKey('account_type'), ... ]) ... ]) @@ -142,6 +143,7 @@ ... 'username': 'johndoe', ... 'first_name': 'John', ... 'last_name': 'Doe', + ... 'account_type': 'standard_user', ... }) True @@ -156,13 +158,15 @@ >>> users = Table('users') # WARNING - This doens't save it yet! - >>> johndoe = Item(users, data={ - ... 'username': 'johndoe', - ... 'first_name': 'John', + >>> janedoe = Item(users, data={ + ... 'username': 'janedoe', + ... 'first_name': 'Jane', ... 'last_name': 'Doe', + ... 'account_type': 'standard_user', ... }) + # The data now gets persisted to the server. - >>> johndoe.save() + >>> janedoe.save() True @@ -177,13 +181,11 @@ >>> from boto.dynamodb2.table import Table >>> users = Table('users') - >>> johndoe = users.get_item(username='johndoe') + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') Once you have an ``Item`` instance, it presents a dictionary-like interface to the data.:: - >>> johndoe = users.get_item(username='johndoe') - # Read a field out. >>> johndoe['first_name'] 'John' @@ -192,7 +194,7 @@ >>> johndoe['first_name'] = 'Johann' # Delete data from it (DOESN'T SAVE YET!). - >>> del johndoe['last_name'] + >>> del johndoe['account_type'] Updating an Item @@ -207,10 +209,13 @@ and, if so, will send all of the item's data. If that expectation fails, the call will fail:: - >>> johndoe = users.get_item(username='johndoe') + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') >>> johndoe['first_name'] = 'Johann' >>> johndoe['whatever'] = "man, that's just like your opinion" - >>> del johndoe['last_name'] + >>> del johndoe['account_type'] # Affects all fields, even the ones not changed locally. >>> johndoe.save() @@ -219,10 +224,9 @@ The second is a full overwrite. If you can be confident your version of the data is the most correct, you can force an overwrite of the data.:: - >>> johndoe = users.get_item(username='johndoe') + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') >>> johndoe['first_name'] = 'Johann' - >>> johndoe['whatever'] = "man, that's just like your opinion" - >>> del johndoe['last_name'] + >>> johndoe['whatever'] = "Man, that's just like your opinion" # Specify ``overwrite=True`` to fully replace the data. >>> johndoe.save(overwrite=True) @@ -232,13 +236,13 @@ can send a partial update that only writes those fields, allowing other (potentially changed) fields to go untouched.:: - >>> johndoe = users.get_item(username='johndoe') + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') >>> johndoe['first_name'] = 'Johann' >>> johndoe['whatever'] = "man, that's just like your opinion" - >>> del johndoe['last_name'] + >>> del johndoe['account_type'] # Partial update, only sending/affecting the - # ``first_name/whatever/last_name`` fields. + # ``first_name/whatever/account_type`` fields. >>> johndoe.partial_save() True @@ -261,7 +265,7 @@ >>> from boto.dynamodb2.table import Table >>> users = Table('users') - >>> users.delete_item(username='johndoe') + >>> users.delete_item(username='johndoe', last_name='Doe') True @@ -276,6 +280,7 @@ The context manager imitates the ``Table.put_item`` & ``Table.delete_item`` APIs. Getting & using the context manager looks like:: + >>> import time >>> from boto.dynamodb2.table import Table >>> users = Table('users') @@ -287,11 +292,12 @@ ... 'date_joined': int(time.time()), ... }) ... batch.put_item(data={ - ... 'username': 'alice', - ... 'first_name': 'Alice', + ... 'username': 'joebloggs', + ... 'first_name': 'Joe', + ... 'last_name': 'Bloggs', ... 'date_joined': int(time.time()), ... }) - ... batch.delete_item(username=jane') + ... batch.delete_item(username='janedoe', last_name='Doe') However, there are some limitations on what you can do within the context manager. @@ -328,16 +334,59 @@ In terms of querying, our original schema is less than optimal. For the following examples, we'll be using the following table setup:: - >>> users = Table.create('users', schema=[ + >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex + >>> from boto.dynamodb2.table import Table + >>> from boto.dynamodb2.types import NUMBER + >>> import time + >>> users = Table.create('users2', schema=[ ... HashKey('account_type'), ... RangeKey('last_name'), - ... ], indexes=[ - ... AllIndex('DateJoinedIndex', parts=[ + ... ], throughput={ + ... 'read': 5, + ... 'write': 15, + ... }, global_indexes=[ + ... GlobalAllIndex('DateJoinedIndex', parts=[ ... HashKey('account_type'), ... RangeKey('date_joined', data_type=NUMBER), - ... ]), + ... ], + ... throughput={ + ... 'read': 1, + ... 'write': 1, + ... }), ... ]) +And the following data:: + + >>> with users.batch_write() as batch: + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'John', + ... 'last_name': 'Doe', + ... 'is_owner': True, + ... 'email': True, + ... 'date_joined': int(time.time()) - (60*60*2), + ... }) + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'Jane', + ... 'last_name': 'Doering', + ... 'date_joined': int(time.time()) - 2, + ... }) + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'Bob', + ... 'last_name': 'Doerr', + ... 'date_joined': int(time.time()) - (60*60*3), + ... }) + ... batch.put_item(data={ + ... 'account_type': 'super_user', + ... 'first_name': 'Alice', + ... 'last_name': 'Liddel', + ... 'is_owner': True, + ... 'email': True, + ... 'date_joined': int(time.time()) - 1, + ... }) + When executing the query, you get an iterable back that contains your results. These results may be spread over multiple requests as DynamoDB paginates them. This is done transparently, but you should be aware it may take more than one @@ -352,9 +401,9 @@ >>> for user in names_with_d: ... print user['first_name'] - 'Bob' - 'Jane' 'John' + 'Jane' + 'Bob' You can also reverse results (``reverse=True``) as well as limiting them (``limit=2``):: @@ -368,11 +417,11 @@ >>> for user in rev_with_d: ... print user['first_name'] - 'John' + 'Bob' 'Jane' You can also run queries against the local secondary indexes. Simply provide -the index name (``index='FirstNameIndex'``) & filter parameters against its +the index name (``index='DateJoinedIndex'``) & filter parameters against its fields:: # Users within the last hour. @@ -384,7 +433,6 @@ >>> for user in recent: ... print user['first_name'] - 'Alice' 'Jane' By default, DynamoDB can return a large amount of data per-request (up to 1Mb @@ -397,13 +445,15 @@ >>> all_users = users.query_2( ... account_type__eq='standard_user', ... date_joined__gte=0, + ... index='DateJoinedIndex', ... max_page_size=10 ... ) # Usage is the same, but now many smaller requests are done. - >>> for user in recent: + >>> for user in all_users: ... print user['first_name'] - 'Alice' + 'Bob' + 'John' 'Jane' Finally, if you need to query on data that's not in either a key or in an @@ -413,8 +463,9 @@ .. warning:: - Scans are consistent & run over the entire table, so relatively speaking, - they're more expensive than plain queries or queries against an LSI. + Scans are eventually consistent & run over the entire table, so + relatively speaking, they're more expensive than plain queries or queries + against an LSI. An example scan of all records in the table looks like:: @@ -423,14 +474,14 @@ Filtering a scan looks like:: >>> owners_with_emails = users.scan( - ... is_owner__eq=1, + ... is_owner__eq=True, ... email__null=False, ... ) - >>> for user in recent: + >>> for user in owners_with_emails: ... print user['first_name'] - 'George' 'John' + 'Alice' The ``ResultSet`` @@ -445,12 +496,16 @@ >>> result_set = users.scan() >>> for user in result_set: ... print user['first_name'] + 'John' + 'Jane' + 'Bob' + 'Alice' However, this throws away results as it fetches more data. As a result, you -can't index it like a ``list``. +can't index it like a ``list``:: >>> len(result_set) - 0 + TypeError: object of type 'ResultSet' has no len() Because it does this, if you need to loop over your results more than once (or do things like negative indexing, length checks, etc.), you should wrap it in @@ -461,6 +516,8 @@ # Slice it for every other user. >>> for user in all_users[::2]: ... print user['first_name'] + 'John' + 'Bob' .. warning:: @@ -585,25 +642,21 @@ Example:: >>> from boto.dynamodb2.table import Table - >>> users = Table('users') + >>> users = Table('users2') # No request yet. >>> many_users = users.batch_get(keys=[ - {'username': 'alice'}, - {'username': 'bob'}, - {'username': 'fred'}, - {'username': 'jane'}, - {'username': 'johndoe'}, - ]) + ... {'account_type': 'standard_user', 'last_name': 'Doe'}, + ... {'account_type': 'standard_user', 'last_name': 'Doering'}, + ... {'account_type': 'super_user', 'last_name': 'Liddel'}, + ... ]) # Now the request is performed, requesting all five in one request. >>> for user in many_users: ... print user['first_name'] 'Alice' - 'Bobby' - 'Fred' - 'Jane' 'John' + 'Jane' Deleting a Table @@ -629,6 +682,7 @@ conn = DynamoDBConnection( host='localhost', port=8000, + aws_access_key_id='anything', aws_secret_access_key='anything', is_secure=False) diff -Nru python-boto-2.34.0/docs/source/index.rst python-boto-2.38.0/docs/source/index.rst --- python-boto-2.34.0/docs/source/index.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/index.rst 2015-04-09 18:57:51.000000000 +0000 @@ -36,6 +36,8 @@ * :doc:`Elastic MapReduce (EMR) ` -- (:doc:`API Reference `) (Python 3) * :doc:`Auto Scaling ` -- (:doc:`API Reference `) (Python 3) * Kinesis -- (:doc:`API Reference `) (Python 3) + * Lambda -- (:doc:`API Reference `) (Python 3) + * EC2 Container Service (ECS) -- (:doc:`API Reference `) (Python 3) * **Content Delivery** @@ -58,16 +60,21 @@ * Data Pipeline -- (:doc:`API Reference `) (Python 3) * Opsworks -- (:doc:`API Reference `) (Python 3) * CloudTrail -- (:doc:`API Reference `) (Python 3) + * CodeDeploy -- (:doc:`API Reference `) (Python 3) -* **Identity & Access** +* **Administration & Security** * Identity and Access Management (IAM) -- (:doc:`API Reference `) (Python 3) * Security Token Service (STS) -- (:doc:`API Reference `) (Python 3) + * Key Management Service (KMS) -- (:doc:`API Reference `) (Python 3) + * Config -- (:doc:`API Reference `) (Python 3) + * CloudHSM -- (:doc:`API Reference `) (Python 3) * **Application Services** * Cloudsearch 2 -- (:doc:`API Reference `) (Python 3) * :doc:`Cloudsearch ` -- (:doc:`API Reference `) (Python 3) + * CloudSearch Domain --(:doc:`API Reference `) (Python 3) * Elastic Transcoder -- (:doc:`API Reference `) (Python 3) * :doc:`Simple Workflow Service (SWF) ` -- (:doc:`API Reference `) (Python 3) * :doc:`Simple Queue Service (SQS) ` -- (:doc:`API Reference `) (Python 3) @@ -75,6 +82,7 @@ * :doc:`Simple Email Service (SES) ` -- (:doc:`API Reference `) (Python 3) * Amazon Cognito Identity -- (:doc:`API Reference `) (Python 3) * Amazon Cognito Sync -- (:doc:`API Reference `) (Python 3) + * Amazon Machine Learning -- (:doc:`API Reference `) (Python 3) * **Monitoring** @@ -135,6 +143,11 @@ .. toctree:: :titlesonly: + releasenotes/v2.37.0 + releasenotes/v2.36.0 + releasenotes/v2.35.2 + releasenotes/v2.35.1 + releasenotes/v2.35.0 releasenotes/v2.34.0 releasenotes/v2.33.0 releasenotes/v2.32.1 diff -Nru python-boto-2.34.0/docs/source/ref/awslamba.rst python-boto-2.38.0/docs/source/ref/awslamba.rst --- python-boto-2.34.0/docs/source/ref/awslamba.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/awslamba.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-awslambda + +========== +AWS Lambda +========== + +boto.awslambda +-------------- + +.. automodule:: boto.awslambda + :members: + :undoc-members: + +boto.awslambda.layer1 +--------------------- + +.. automodule:: boto.awslambda.layer1 + :members: + :undoc-members: + +boto.awslambda.exceptions +------------------------- + +.. automodule:: boto.awslambda.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/cloudhsm.rst python-boto-2.38.0/docs/source/ref/cloudhsm.rst --- python-boto-2.34.0/docs/source/ref/cloudhsm.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/cloudhsm.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-cloudhsm + +======== +CloudHSM +======== + +boto.cloudhsm +------------- + +.. automodule:: boto.cloudhsm + :members: + :undoc-members: + +boto.cloudhsm.layer1 +-------------------- + +.. automodule:: boto.cloudhsm.layer1 + :members: + :undoc-members: + +boto.cloudhsm.exceptions +------------------------ + +.. automodule:: boto.cloudhsm.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/cloudsearchdomain.rst python-boto-2.38.0/docs/source/ref/cloudsearchdomain.rst --- python-boto-2.34.0/docs/source/ref/cloudsearchdomain.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/cloudsearchdomain.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-cloudsearchdomain + +================== +CloudSearch Domain +================== + +boto.cloudsearchdomain +---------------------- + +.. automodule:: boto.cloudsearchdomain + :members: + :undoc-members: + +boto.cloudsearchdomain.layer1 +----------------------------- + +.. automodule:: boto.cloudsearchdomain.layer1 + :members: + :undoc-members: + +boto.cloudsearchdomain.exceptions +--------------------------------- + +.. automodule:: boto.cloudsearchdomain.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/codedeploy.rst python-boto-2.38.0/docs/source/ref/codedeploy.rst --- python-boto-2.34.0/docs/source/ref/codedeploy.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/codedeploy.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-codedeploy + +========== +CodeDeploy +========== + +boto.codedeploy +--------------- + +.. automodule:: boto.codedeploy + :members: + :undoc-members: + +boto.codedeploy.layer1 +------------------- + +.. automodule:: boto.codedeploy.layer1 + :members: + :undoc-members: + +boto.codedeploy.exceptions +----------------------- + +.. automodule:: boto.codedeploy.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/configservice.rst python-boto-2.38.0/docs/source/ref/configservice.rst --- python-boto-2.34.0/docs/source/ref/configservice.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/configservice.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-configservice + +====== +Config +====== + +boto.configservice +------------------ + +.. automodule:: boto.configservice + :members: + :undoc-members: + +boto.configservice.layer1 +------------------------- + +.. automodule:: boto.configservice.layer1 + :members: + :undoc-members: + +boto.configservice.exceptions +----------------------------- + +.. automodule:: boto.configservice.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/ec2containerservice.rst python-boto-2.38.0/docs/source/ref/ec2containerservice.rst --- python-boto-2.34.0/docs/source/ref/ec2containerservice.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/ec2containerservice.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-ec2containerservice + +===================== +EC2 Container Service +===================== + +boto.ec2containerservice +------------------------ + +.. automodule:: boto.ec2containerservice + :members: + :undoc-members: + +boto.ec2containerservice.layer1 +------------------------------- + +.. automodule:: boto.ec2containerservice.layer1 + :members: + :undoc-members: + +boto.ec2containerservice.exceptions +----------------------------------- + +.. automodule:: boto.ec2containerservice.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/elb.rst python-boto-2.38.0/docs/source/ref/elb.rst --- python-boto-2.34.0/docs/source/ref/elb.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/elb.rst 2015-04-09 18:57:51.000000000 +0000 @@ -47,15 +47,21 @@ :undoc-members: boto.ec2.elb.policies -------------------------- +--------------------- .. automodule:: boto.ec2.elb.policies :members: :undoc-members: boto.ec2.elb.securitygroup -------------------------- +-------------------------- .. automodule:: boto.ec2.elb.securitygroup :members: :undoc-members: + +boto.ec2.elb.attributes +----------------------- +.. automodule:: boto.ec2.elb.attributes + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/kms.rst python-boto-2.38.0/docs/source/ref/kms.rst --- python-boto-2.34.0/docs/source/ref/kms.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/kms.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-kms + +=== +KMS +=== + +boto.kms +-------- + +.. automodule:: boto.kms + :members: + :undoc-members: + +boto.kms.layer1 +--------------- + +.. automodule:: boto.kms.layer1 + :members: + :undoc-members: + +boto.kms.exceptions +----------------------- + +.. automodule:: boto.kms.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/ref/machinelearning.rst python-boto-2.38.0/docs/source/ref/machinelearning.rst --- python-boto-2.34.0/docs/source/ref/machinelearning.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/ref/machinelearning.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-machinelearning + +================ +Machine Learning +================ + +boto.machinelearning +-------------------- + +.. automodule:: boto.machinelearning + :members: + :undoc-members: + +boto.machinelearning.layer1 +--------------------------- + +.. automodule:: boto.machinelearning.layer1 + :members: + :undoc-members: + +boto.machinelearning.exceptions +------------------------------- + +.. automodule:: boto.machinelearning.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.35.0.rst python-boto-2.38.0/docs/source/releasenotes/v2.35.0.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.35.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.35.0.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,55 @@ +boto v2.35.0 +=========== + +:date: 2015/01/08 + +This release adds support for Amazon EC2 Classic Link which allows users +to link classic instances to Classic Link enabled VPCs, +adds support for Amazon CloudSearch Domain, adds sigv4 support +for Elastic Load Balancing, and fixes several other issues including issues +making anonymous AWS Security Token Service requests. + + +Changes +------- +* Add Amazon EC2 Classic Link support (:sha: `5dbd2d7`) +* Add query string to body for anon STS POST (:issue:`2812`, :sha:`6513789`) +* Fix bug that prevented initializing a dynamo item from existing item (:issue:`2764`, :sha:`743e814`) +* * switchover-sigv4: Add integ tests for sigv4 switchover Switch elb/ec2 over to signature version 4 (:sha:`0dadce8`) +* Return SetStackPolicyResponse - (:issue:`2822`, :issue:`2346`, :issue:`2639`, :sha:`c4defb4`) +* Added ELB Attributes to docs. (:issue:`2821`, :sha:`5dfeba9`) +* Fix bug by using correct string joining syntax. (:issue:`2817`, :sha:`8426148`) +* Fix SES get_identity_dkim_attributes when input length > 1. (:issue:`2810`, :sha:`cc4d42d`) +* DynamoDB table batch_get fails to process all remaining results if single batch result is empty. (:issue:`2809`, :sha:`a193bc0`) +* Added suppport for additional fields in EMR objects. (:issue:`2807`, :sha:`2936ac0`) +* Pass version_id in copy if key is versioned. (:issue:`2803`, :sha:`66b3604`) +* Add support for SQS PurgeQueue operation. (:issue:`2806`, :sha:`90a5d44`) +* Update documentation for launchconfig. (:issue:`2802`, :sha:`0dc8412`) +* Remove unimplemented config param. (:issue:`2801`, :issue:`2572`, :sha:`f1a5ebd`) +* Add support for private hosted zones. (:issue:`2785`, :sha:`2e7829b`) +* Fix Key.change_storage_class so that it obeys dst_bucket. (:issue:`2752`, :sha:`55ed184`) +* Fix for s3put host specification. (:issue:`2736`, :issue:`2522`, :sha:`1af31f2`) +* Improve handling of Glacier HTTP 204 responses. (:issue:`2726`, :sha:`c314298`) +* Fix raising exception syntax in Python 3. (:issue:`2735`, :issue:`2563`, :sha:`58f76f6`) +* Privatezone: Adding unit/integration test coverage (:issue:`1`, :sha:`d1ff14e`) +* Minor documentation/pep8 fixes. (:issue:`2753`, :sha:`6a853be`) +* Correct argument type in doc string. (:issue:`2728`, :sha:`1ddf6df`) +* Use exclusive start key to get all items from DynamoDB query. (:issue:`2676`, :issue:`2573`, :sha:`419d8a5`) +* Updated link to current config documentation. (:issue:`2755`, :sha:`9be3f85`) +* Fix the SQS certificate error for region cn-north-1. (:issue:`2766`, :sha:`1d5368a`) +* Adds support for getting health checker IP ranges from Route53. (:issue:`2792`, :sha:`ee14911`) +* fix: snap.create_volume documentation lists general purpose ssd. Fixes @2774. (:issue:`2774`, :sha:`36fae2b`) +* Fixed param type in get_contents_to_filename docstring. (:issue:`2783`, :sha:`478f66a`) +* Update DynamoDB local example to include fake access key id. (:issue:`2791`, :sha:`2c1f8d5`) +* Added 'end' attribute to ReservedInstance. (:issue:`2793`, :issue:`2757`, :sha:`28814d8`) +* Parse ClusterStatus’s StateChangeReason. (:issue:`2696`, :sha:`48c5d17`) +* Adds SupportedProducts field to EMR JobFlow objects. (:issue:`2775`, :sha:`6771d04`) +* Fix EMR endpoint. (:issue:`2750`, :sha:`8329e02`) +* Detect old-style S3 URL for auto-sigv4. (:issue:`2773`, :sha:`f5be409`) +* Throw host warning for cloudsearch domain (:issue:`2765`, :sha:`9af6f41`) +* Fix CloudSearch2 to work with IAM-based search and upload requests (:issue:`2717`, :sha:`9f4fe8b`) +* iam: add support for Account Password Policy APIs (:issue:`2574`, :sha:`6c9bd53`) +* Handle sigv4 non-string header values properly (:issue:`2744`, :sha:`e043e4b`) +* Url encode query string for pure query (:issue:`2720`, :sha:`bbbf9d2`) + + diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.35.1.rst python-boto-2.38.0/docs/source/releasenotes/v2.35.1.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.35.1.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.35.1.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,14 @@ +boto v2.35.1 +============ + +:date: 2015/01/09 + +This release fixes a regression which results in an infinite while loop of +requests if you query an empty Amazon DynamoDB table. + + +Changes +------- +* Check for results left after computing self._keys_left (:issue:`2871`, :sha:`d3c2595`) + + diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.35.2.rst python-boto-2.38.0/docs/source/releasenotes/v2.35.2.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.35.2.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.35.2.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,16 @@ +boto v2.32.2 +============ + +:date: 2015/01/19 + +This release adds ClassicLink support for Auto Scaling and fixes a few issues. + + +Changes +------- +* Add support for new data types in DynamoDB. (:issue:`2667`, :sha:`68ad513`) +* Expose cloudformation `UsePreviousTemplate` parameter. (:issue:`2843`, :issue:`2628`, :sha:`873e89c`) +* Fix documentation around using custom connections for DynamoDB tables. (:issue:`2842`, :issue:`1585`, :sha:`71d677f`) +* Fixed bug that unable call query_2 after call describe method on dynamodb2 module. (:issue:`2829`, :sha:`66addce`) + + diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.36.0.rst python-boto-2.38.0/docs/source/releasenotes/v2.36.0.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.36.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.36.0.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,27 @@ +boto v2.36.0 +============ + +:date: 2015/01/27 + +This release adds support for AWS Key Management Service (KMS), AWS Lambda, +AWS CodeDeploy, AWS Config, AWS CloudHSM, Amazon EC2 Container Service (ECS), +Amazon DynamoDB online indexing, and fixes a few issues. + + +Changes +------- +* Add Amazon DynamoDB online indexing support. +* Allow for binary to be passed to sqs message (:issue:`2913`, :sha:`8af9b42`) +* Kinesis update (:issue:`2891`, :sha:`4874e19`) +* Fixed spelling of boto.awslambda package. (:issue:`2914`, :sha:`de769ac`) +* Add support for Amazon EC2 Container Service (:issue:`2908`, :sha:`4480fb4`) +* Add support for CloudHSM (:issue:`2905`, :sha:`6055a35`) +* Add support for AWS Config (:issue:`2904`, :sha:`51e9221`) +* Add support for AWS CodeDeploy (:issue:`2899`, :sha:`d935356`) +* Add support for AWS Lambda (:issue:`2896`, :sha:`6748016`) +* Update both Cognito's to the latest APIs (:issue:`2909`, :sha:`18c1251`) +* Add sts for eu-central-1. (:issue:`2906`, :sha:`54714ff`) +* Update opsworks to latest API (:issue:`2892`, :sha:`aed3302`) +* Add AWS Key Managment Support (:issue:`2894`, :sha:`ef7d2cd`) + + diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.37.0.rst python-boto-2.38.0/docs/source/releasenotes/v2.37.0.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.37.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.37.0.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,39 @@ +boto v2.37.0 +============ + +:date: 2015/04/02 + +This release updates AWS CloudTrail to the latest API to suppor the +``LookupEvents`` operation, adds new regional service endpoints and fixes +bugs in several services. + +.. note:: + + The CloudTrail ``create_trail`` operation no longer supports the deprecated + ``trail`` parameter, which has been marked for removal by the service + since early 2014. Instead, you pass each trail parameter as a keyword + argument now. Please see the + `reference `__ + to help port over existing code. + + +Changes +------- +* Update AWS CloudTrail to the latest API. (:issue:`3074`, :sha:`bccc29a`) +* Add support for UsePreviousValue to CloudFormation UpdateStack. (:issue:`3029`, :sha:`8a8a22a`) +* Fix BOTH_PATH to work with Windows drives (:issue:`2823`, :sha:`7ba973e`) +* Fix division calculation in S3 docs. (:issue:`3018`, :sha:`4ffd9ba`) +* Add Boto 3 link in README. (:issue:`3013`, :sha:`561716c`) +* Add more regions for configservice (:issue:`3009`, :sha:`a82244f`) +* Add ``eu-central-1`` endpoints (Frankfurt region) for IAM and Route53 (:sha:`5ff4add`) +* Fix unit tests from hanging (:sha:`da9f9b7`) +* Fixed wording in dynamodb tutorial (:issue:`2993`, :sha:`36cadf4`) +* Update SWF objects to keep a consistent region name. (:issue:`2985`, :issue:`2980`, :issue:`2606`, :sha:`ce75a19`) +* Print archive ID in glacier upload script. (:issue:`2951`, :sha:`047c7d3`) +* Add some minor documentation for Route53 tutorial. (:issue:`2952`, :sha:`b855fb3`) +* Add Amazon DynamoDB online indexing support on High level API (:issue:`2925`, :sha:`0621c53`) +* Ensure Content-Length header is a string. (:issue:`2932`, :sha:`34a0f63`) +* Correct docs around overriding SGs on ELBs (:issue:`2937`, :sha:`84d0ff9`) +* Fix DynamoDB tests. (:sha:`616ee80`) +* Fix region bug. (:issue:`2927`, :sha:`b1cb61e`) +* Fix import for ``boto.cloudhsm.layer1.CloudHSMConnection``. (:issue:`2926`, :sha:`1944d35`) diff -Nru python-boto-2.34.0/docs/source/releasenotes/v2.38.0.rst python-boto-2.38.0/docs/source/releasenotes/v2.38.0.rst --- python-boto-2.34.0/docs/source/releasenotes/v2.38.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/docs/source/releasenotes/v2.38.0.rst 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,13 @@ +boto v2.38.0 +============ + +:date: 2015/04/09 + +This release adds support for Amazon Machine Learning and fixes a couple of +issues. + + +Changes +------- +* Add support for Amazon Machine Learning (:sha:`ab32d572`) +* Fix issue with modify reserved instances for modifying instance type (:issue:`3085`, :sha:`b8ea7a04`) diff -Nru python-boto-2.34.0/docs/source/route53_tut.rst python-boto-2.38.0/docs/source/route53_tut.rst --- python-boto-2.34.0/docs/source/route53_tut.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/route53_tut.rst 2015-04-09 18:57:51.000000000 +0000 @@ -85,3 +85,19 @@ When the status has changed to *INSYNC*, the change has been propagated to remote servers +Working with Change Sets +----------------------- + +You can also do bulk updates using ResourceRecordSets. For example updating the TTL + +>>> zone = conn.get_zone('example.com') +>>> change_set = boto.route53.record.ResourceRecordSets(conn, zone.id) +>>> for rrset in conn.get_all_rrsets(zone.id): +... u = change_set.add_change("UPSERT", rrset.name, rrset.type, ttl=3600) +... u.add_value(rrset.resource_records[0]) +... results = change_set.commit() +Done + +In this example we update the TTL to 1hr (3600 seconds) for all records recursed from +example.com. +Note: this will also change the SOA and NS records which may not be ideal for many users. diff -Nru python-boto-2.34.0/docs/source/s3_tut.rst python-boto-2.38.0/docs/source/s3_tut.rst --- python-boto-2.34.0/docs/source/s3_tut.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/s3_tut.rst 2015-04-09 18:57:51.000000000 +0000 @@ -190,12 +190,12 @@ # Use a chunk size of 50 MiB (feel free to change this) >>> chunk_size = 52428800 - >>> chunk_count = int(math.ceil(source_size / chunk_size)) + >>> chunk_count = int(math.ceil(source_size / float(chunk_size))) # Send the file parts, using FileChunkIO to create a file-like object # that points to a certain byte range within the original file. We # set bytes to never exceed the original file size. - >>> for i in range(chunk_count + 1): + >>> for i in range(chunk_count): >>> offset = chunk_size * i >>> bytes = min(chunk_size, source_size - offset) >>> with FileChunkIO(source_path, 'r', offset=offset, diff -Nru python-boto-2.34.0/docs/source/sqs_tut.rst python-boto-2.38.0/docs/source/sqs_tut.rst --- python-boto-2.34.0/docs/source/sqs_tut.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/docs/source/sqs_tut.rst 2015-04-09 18:57:51.000000000 +0000 @@ -262,12 +262,9 @@ >>> q.count() 10 -This can be handy but this command as well as the other two utility methods -I'll describe in a minute are inefficient and should be used with caution -on queues with lots of messages (e.g. many hundreds or more). Similarly, -you can clear (delete) all messages in a queue with: +Removing all messages in a queue is as simple as calling purge: ->>> q.clear() +>>> q.purge() Be REAL careful with that one! Finally, if you want to dump all of the messages in a queue to a local file: diff -Nru python-boto-2.34.0/README.rst python-boto-2.38.0/README.rst --- python-boto-2.34.0/README.rst 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/README.rst 2015-04-09 18:57:51.000000000 +0000 @@ -1,9 +1,9 @@ #### boto #### -boto 2.34.0 +boto 2.38.0 -Released: 23-Oct-2014 +Released: 9-Apr-2015 .. image:: https://travis-ci.org/boto/boto.svg?branch=develop :target: https://travis-ci.org/boto/boto @@ -26,6 +26,22 @@ others know about your work in progress. Tests **must** pass on Python 2.6, 2.7, 3.3, and 3.4 for pull requests to be accepted. +****** +Boto 3 +****** +The next major version of Boto is currently in developer preview and can +be found in the `Boto 3 `__ +repository and installed via ``pip``. It supports the latest service APIs +and provides a high-level object-oriented interface to many services. + +Please try Boto 3 and +`leave feedback `__ with any issues, +suggestions, and feature requests you might have. + +******** +Services +******** + At the moment, boto supports: * Compute @@ -34,6 +50,8 @@ * Amazon Elastic Map Reduce (EMR) (Python 3) * AutoScaling (Python 3) * Amazon Kinesis (Python 3) + * AWS Lambda (Python 3) + * Amazon EC2 Container Service (Python 3) * Content Delivery @@ -54,14 +72,19 @@ * AWS Data Pipeline (Python 3) * AWS Opsworks (Python 3) * AWS CloudTrail (Python 3) + * AWS CodeDeploy (Python 3) -* Identity & Access +* Administration & Security * AWS Identity and Access Management (IAM) (Python 3) + * AWS Key Management Service (KMS) (Python 3) + * AWS Config (Python 3) + * AWS CloudHSM (Python 3) * Application Services * Amazon CloudSearch (Python 3) + * Amazon CloudSearch Domain (Python 3) * Amazon Elastic Transcoder (Python 3) * Amazon Simple Workflow Service (SWF) (Python 3) * Amazon Simple Queue Service (SQS) (Python 3) @@ -69,6 +92,7 @@ * Amazon Simple Email Service (SES) (Python 3) * Amazon Cognito Identity (Python 3) * Amazon Cognito Sync (Python 3) + * Amazon Machine Learning (Python 3) * Monitoring @@ -179,7 +203,7 @@ .. _github.com: http://github.com/boto/boto .. _Online documentation: http://docs.pythonboto.org .. _Python Cheese Shop: http://pypi.python.org/pypi/boto -.. _this: http://code.google.com/p/boto/wiki/BotoConfig +.. _this: http://docs.pythonboto.org/en/latest/boto_config_tut.html .. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/ .. _neo: https://github.com/boto/boto/tree/neo .. _boto-users Google Group: https://groups.google.com/forum/?fromgroups#!forum/boto-users diff -Nru python-boto-2.34.0/requirements.txt python-boto-2.38.0/requirements.txt --- python-boto-2.34.0/requirements.txt 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/requirements.txt 2015-04-09 18:57:51.000000000 +0000 @@ -3,7 +3,7 @@ rsa==3.1.4 simplejson==3.5.2 argparse==1.2.1 -httpretty>=0.7.0 +httpretty>=0.7.0,<=0.8.6 paramiko>=1.10.0 PyYAML>=3.10 coverage==3.7.1 diff -Nru python-boto-2.34.0/setup.py python-boto-2.38.0/setup.py --- python-boto-2.34.0/setup.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/setup.py 2015-04-09 18:57:51.000000000 +0000 @@ -78,7 +78,11 @@ "boto.directconnect", "boto.kinesis", "boto.rds2", "boto.cloudsearch2", "boto.logs", "boto.vendored", "boto.route53.domains", "boto.cognito", - "boto.cognito.identity", "boto.cognito.sync"], + "boto.cognito.identity", "boto.cognito.sync", + "boto.cloudsearchdomain", "boto.kms", + "boto.awslambda", "boto.codedeploy", "boto.configservice", + "boto.cloudhsm", "boto.ec2containerservice", + "boto.machinelearning"], package_data = { "boto.cacerts": ["cacerts.txt"], "boto": ["endpoints.json"], diff -Nru python-boto-2.34.0/tests/integration/awslambda/__init__.py python-boto-2.38.0/tests/integration/awslambda/__init__.py --- python-boto-2.34.0/tests/integration/awslambda/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/awslambda/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/integration/awslambda/test_awslambda.py python-boto-2.38.0/tests/integration/awslambda/test_awslambda.py --- python-boto-2.34.0/tests/integration/awslambda/test_awslambda.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/awslambda/test_awslambda.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.awslambda.exceptions import ResourceNotFoundException +from tests.compat import unittest + + +class TestAWSLambda(unittest.TestCase): + def setUp(self): + self.awslambda = boto.connect_awslambda() + + def test_list_functions(self): + response = self.awslambda.list_functions() + self.assertIn('Functions', response) + + def test_resource_not_found_exceptions(self): + with self.assertRaises(ResourceNotFoundException): + self.awslambda.get_function(function_name='non-existant-function') diff -Nru python-boto-2.34.0/tests/integration/cloudformation/test_connection.py python-boto-2.38.0/tests/integration/cloudformation/test_connection.py --- python-boto-2.34.0/tests/integration/cloudformation/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/cloudformation/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -10,6 +10,14 @@ "AWSTemplateFormatVersion": "2010-09-09", "Description": "AWS CloudFormation Sample Template EC2InstanceSample", "Parameters": { + "Parameter1": { + "Description": "Test Parameter 1", + "Type": "String" + }, + "Parameter2": { + "Description": "Test Parameter 2", + "Type": "String" + } }, "Mappings": { "RegionMap": { @@ -32,7 +40,14 @@ ] }, "UserData": { - "Fn::Base64": "a" * 15000 + "Fn::Base64": { + "Fn::Join":[ + "", + [{"Ref": "Parameter1"}, + {"Ref": "Parameter2"}] + ] + } + } } } @@ -102,7 +117,9 @@ # See https://github.com/boto/boto/issues/1037 body = self.connection.create_stack( self.stack_name, - template_body=json.dumps(BASIC_EC2_TEMPLATE)) + template_body=json.dumps(BASIC_EC2_TEMPLATE), + parameters=[('Parameter1', 'initial_value'), + ('Parameter2', 'initial_value')]) self.addCleanup(self.connection.delete_stack, self.stack_name) # A newly created stack should have events @@ -114,9 +131,39 @@ self.assertEqual(None, policy) # Our new stack should show up in the stack list - stacks = self.connection.describe_stacks() - self.assertEqual(self.stack_name, stacks[0].stack_name) - - + stacks = self.connection.describe_stacks(self.stack_name) + stack = stacks[0] + self.assertEqual(self.stack_name, stack.stack_name) + + params = [(p.key, p.value) for p in stack.parameters] + self.assertEquals([('Parameter1', 'initial_value'), + ('Parameter2', 'initial_value')], params) + + for _ in range(30): + stack.update() + if stack.stack_status.find("PROGRESS") == -1: + break + time.sleep(5) + + body = self.connection.update_stack( + self.stack_name, + template_body=json.dumps(BASIC_EC2_TEMPLATE), + parameters=[('Parameter1', '', True), + ('Parameter2', 'updated_value')]) + + stacks = self.connection.describe_stacks(self.stack_name) + stack = stacks[0] + params = [(p.key, p.value) for p in stacks[0].parameters] + self.assertEquals([('Parameter1', 'initial_value'), + ('Parameter2', 'updated_value')], params) + + # Waiting for the update to complete to unblock the delete_stack in the + # cleanup. + for _ in range(30): + stack.update() + if stack.stack_status.find("PROGRESS") == -1: + break + time.sleep(5) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/integration/cloudhsm/__init__.py python-boto-2.38.0/tests/integration/cloudhsm/__init__.py --- python-boto-2.34.0/tests/integration/cloudhsm/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/cloudhsm/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/integration/cloudhsm/test_cloudhsm.py python-boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py --- python-boto-2.34.0/tests/integration/cloudhsm/test_cloudhsm.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from tests.compat import unittest +from boto.cloudhsm.exceptions import InvalidRequestException + + +class TestCloudHSM(unittest.TestCase): + def setUp(self): + self.cloudhsm = boto.connect_cloudhsm() + + def test_hapgs(self): + label = 'my-hapg' + response = self.cloudhsm.create_hapg(label=label) + hapg_arn = response['HapgArn'] + self.addCleanup(self.cloudhsm.delete_hapg, hapg_arn) + + response = self.cloudhsm.list_hapgs() + self.assertIn(hapg_arn, response['HapgList']) + + def test_validation_exception(self): + invalid_arn = 'arn:aws:cloudhsm:us-east-1:123456789012:hapg-55214b8d' + with self.assertRaises(InvalidRequestException): + self.cloudhsm.describe_hapg(invalid_arn) diff -Nru python-boto-2.34.0/tests/integration/codedeploy/__init__.py python-boto-2.38.0/tests/integration/codedeploy/__init__.py --- python-boto-2.34.0/tests/integration/codedeploy/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/codedeploy/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/integration/codedeploy/test_codedeploy.py python-boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py --- python-boto-2.34.0/tests/integration/codedeploy/test_codedeploy.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.codedeploy.exceptions import ApplicationDoesNotExistException +from tests.compat import unittest + + +class TestCodeDeploy(unittest.TestCase): + def setUp(self): + self.codedeploy = boto.connect_codedeploy() + + def test_applications(self): + application_name = 'my-boto-application' + self.codedeploy.create_application(application_name=application_name) + self.addCleanup(self.codedeploy.delete_application, application_name) + response = self.codedeploy.list_applications() + self.assertIn(application_name, response['applications']) + + def test_exception(self): + with self.assertRaises(ApplicationDoesNotExistException): + self.codedeploy.get_application('some-non-existant-app') diff -Nru python-boto-2.34.0/tests/integration/configservice/__init__.py python-boto-2.38.0/tests/integration/configservice/__init__.py --- python-boto-2.34.0/tests/integration/configservice/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/configservice/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/integration/configservice/test_configservice.py python-boto-2.38.0/tests/integration/configservice/test_configservice.py --- python-boto-2.34.0/tests/integration/configservice/test_configservice.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/configservice/test_configservice.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.configservice.exceptions import NoSuchConfigurationRecorderException +from tests.compat import unittest + + +class TestConfigService(unittest.TestCase): + def setUp(self): + self.configservice = boto.connect_configservice() + + def test_describe_configuration_recorders(self): + response = self.configservice.describe_configuration_recorders() + self.assertIn('ConfigurationRecorders', response) + + def test_handle_no_such_configuration_recorder(self): + with self.assertRaises(NoSuchConfigurationRecorderException): + self.configservice.describe_configuration_recorders( + configuration_recorder_names=['non-existant-recorder']) + + def test_connect_to_non_us_east_1(self): + self.configservice = boto.configservice.connect_to_region('us-west-2') + response = self.configservice.describe_configuration_recorders() + self.assertIn('ConfigurationRecorders', response) diff -Nru python-boto-2.34.0/tests/integration/dynamodb2/test_highlevel.py python-boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py --- python-boto-2.34.0/tests/integration/dynamodb2/test_highlevel.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py 2015-04-09 18:57:51.000000000 +0000 @@ -29,10 +29,11 @@ from tests.unit import unittest from boto.dynamodb2 import exceptions from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex, - GlobalKeysOnlyIndex, GlobalIncludeIndex) + GlobalKeysOnlyIndex, GlobalIncludeIndex, + GlobalAllIndex) from boto.dynamodb2.items import Item from boto.dynamodb2.table import Table -from boto.dynamodb2.types import NUMBER +from boto.dynamodb2.types import NUMBER, STRING try: import json @@ -279,8 +280,14 @@ johndoe = users.get_item(username='johndoe', friend_count=4) johndoe.delete() + # Set batch get limit to ensure keys with no results are + # handled correctly. + users.max_batch_get = 2 + # Test the eventually consistent batch get. results = users.batch_get(keys=[ + {'username': 'noone', 'friend_count': 4}, + {'username': 'nothere', 'friend_count': 10}, {'username': 'bob', 'friend_count': 1}, {'username': 'jane', 'friend_count': 3} ]) @@ -639,3 +646,176 @@ '2013-12-24T15:22:22', ] ) + + def test_query_after_describe_with_gsi(self): + # Create a table to using gsi to reproduce the error mentioned on issue + # https://github.com/boto/boto/issues/2828 + users = Table.create('more_gsi_query_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 5 + }, global_indexes=[ + GlobalAllIndex('EmailGSIIndex', parts=[ + HashKey('email') + ], throughput={ + 'read': 1, + 'write': 1 + }) + ]) + + # Add this function to be called after tearDown() + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + # populate a couple of items in it + users.put_item(data={ + 'user_id': '7', + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + 'email': 'johndoe@johndoe.com', + }) + users.put_item(data={ + 'user_id': '24', + 'username': 'alice', + 'first_name': 'Alice', + 'last_name': 'Expert', + 'email': 'alice@alice.com', + }) + users.put_item(data={ + 'user_id': '35', + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + 'email': 'jane@jane.com', + }) + + # Try the GSI. it should work. + rs = users.query_2( + email__eq='johndoe@johndoe.com', + index='EmailGSIIndex' + ) + + for rs_item in rs: + self.assertEqual(rs_item['username'], ['johndoe']) + + # The issue arises when we're introspecting the table and try to + # query_2 after call describe method + users_hit_api = Table('more_gsi_query_users') + users_hit_api.describe() + + # Try the GSI. This is what os going wrong on #2828 issue. It should + # work fine now. + rs = users_hit_api.query_2( + email__eq='johndoe@johndoe.com', + index='EmailGSIIndex' + ) + + for rs_item in rs: + self.assertEqual(rs_item['username'], ['johndoe']) + + def test_update_table_online_indexing_support(self): + # Create a table using gsi to test the DynamoDB online indexing support + # https://github.com/boto/boto/pull/2925 + users = Table.create('online_indexing_support_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 5 + }, global_indexes=[ + GlobalAllIndex('EmailGSIIndex', parts=[ + HashKey('email') + ], throughput={ + 'read': 2, + 'write': 2 + }) + ]) + + # Add this function to be called after tearDown() + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 2) + self.assertEqual(users.global_indexes[0].throughput['write'], 2) + + # Update a GSI throughput. it should work. + users.update_global_secondary_index(global_indexes={ + 'EmailGSIIndex': { + 'read': 2, + 'write': 1, + } + }) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 2) + self.assertEqual(users.global_indexes[0].throughput['write'], 1) + + # Update a GSI throughput using the old fashion way for compatibility + # purposes. it should work. + users.update(global_indexes={ + 'EmailGSIIndex': { + 'read': 3, + 'write': 2, + } + }) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 3) + self.assertEqual(users.global_indexes[0].throughput['write'], 2) + + # Delete a GSI. it should work. + users.delete_global_secondary_index('EmailGSIIndex') + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 0) + + # Create a GSI. it should work. + users.create_global_secondary_index( + global_index=GlobalAllIndex( + 'AddressGSIIndex', parts=[ + HashKey('address', data_type=STRING) + ], throughput={ + 'read': 1, + 'write': 1, + }) + ) + # Wait for it. This operation usually takes much longer than the others + time.sleep(60*10) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 1) + self.assertEqual(users.global_indexes[0].throughput['write'], 1) diff -Nru python-boto-2.34.0/tests/integration/ec2/elb/test_connection.py python-boto-2.38.0/tests/integration/ec2/elb/test_connection.py --- python-boto-2.34.0/tests/integration/ec2/elb/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/ec2/elb/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -28,6 +28,7 @@ import time from tests.compat import unittest from boto.ec2.elb import ELBConnection +import boto.ec2.elb class ELBConnectionTest(unittest.TestCase): @@ -286,6 +287,11 @@ [] ) + def test_can_make_sigv4_call(self): + connection = boto.ec2.elb.connect_to_region('eu-central-1') + lbs = connection.get_all_load_balancers() + self.assertTrue(isinstance(lbs, list)) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/integration/ec2/test_connection.py python-boto-2.38.0/tests/integration/ec2/test_connection.py --- python-boto-2.34.0/tests/integration/ec2/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/ec2/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -33,6 +33,7 @@ from nose.plugins.attrib import attr from boto.ec2.connection import EC2Connection from boto.exception import EC2ResponseError +import boto.ec2 class EC2ConnectionTest(unittest.TestCase): @@ -239,3 +240,7 @@ # And kill it. rs.instances[0].terminate() + + def test_can_get_all_instances_sigv4(self): + connection = boto.ec2.connect_to_region('eu-central-1') + self.assertTrue(isinstance(connection.get_all_instances(), list)) diff -Nru python-boto-2.34.0/tests/integration/ec2containerservice/__init__.py python-boto-2.38.0/tests/integration/ec2containerservice/__init__.py --- python-boto-2.34.0/tests/integration/ec2containerservice/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/ec2containerservice/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/integration/ec2containerservice/test_ec2containerservice.py python-boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py --- python-boto-2.34.0/tests/integration/ec2containerservice/test_ec2containerservice.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.ec2containerservice.exceptions import ClientException +from tests.compat import unittest + + +class TestEC2ContainerService(unittest.TestCase): + def setUp(self): + self.ecs = boto.connect_ec2containerservice() + + def test_list_clusters(self): + response = self.ecs.list_clusters() + self.assertIn('clusterArns', + response['ListClustersResponse']['ListClustersResult']) + + def test_handle_not_found_exception(self): + with self.assertRaises(ClientException): + # Try to stop a task with an invalid arn. + self.ecs.stop_task(task='foo') diff -Nru python-boto-2.34.0/tests/integration/iam/test_connection.py python-boto-2.38.0/tests/integration/iam/test_connection.py --- python-boto-2.34.0/tests/integration/iam/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/iam/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -27,6 +27,8 @@ class TestIAM(unittest.TestCase): + iam = True + def test_group_users(self): # A very basic test to create a group, a user, add the user # to the group and then delete everything diff -Nru python-boto-2.34.0/tests/integration/iam/test_password_policy.py python-boto-2.38.0/tests/integration/iam/test_password_policy.py --- python-boto-2.34.0/tests/integration/iam/test_password_policy.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/iam/test_password_policy.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright (c) 2014 Rocket Internet AG. +# Luca Bruno +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import time + +from tests.compat import unittest + +class IAMAccountPasswordPolicy(unittest.TestCase): + iam = True + + def test_password_policy(self): + # A series of tests to check the password policy API + iam = boto.connect_iam() + + # First preserve what is the current password policy + try: + initial_policy_result = iam.get_account_password_policy() + except boto.exception.BotoServerError as srv_error: + initial_policy = None + if srv_error.status != 404: + raise srv_error + + # Update the policy and check it back + test_min_length = 88 + iam.update_account_password_policy(minimum_password_length=test_min_length) + new_policy = iam.get_account_password_policy() + new_min_length = new_policy['get_account_password_policy_response']\ + ['get_account_password_policy_result']['password_policy']\ + ['minimum_password_length'] + + if test_min_length != int(new_min_length): + raise Exception("Failed to update account password policy") + + # Delete the policy and check the correct deletion + test_policy = '' + iam.delete_account_password_policy() + try: + test_policy = iam.get_account_password_policy() + except boto.exception.BotoServerError as srv_error: + test_policy = None + if srv_error.status != 404: + raise srv_error + + if test_policy is not None: + raise Exception("Failed to delete account password policy") + + # Restore initial account password policy + if initial_policy: + p = initial_policy['get_account_password_policy_response']\ + ['get_account_password_policy_result']['password_policy'] + iam.update_account_password_policy(minimum_password_length=int(p['minimum_password_length']), + allow_users_to_change_password=bool(p['allow_users_to_change_password']), + hard_expiry=bool(p['hard_expiry']), + max_password_age=int(p['max_password_age']), + password_reuse_prevention=int(p['password_reuse_prevention']), + require_lowercase_characters=bool(p['require_lowercase_characters']), + require_numbers=bool(p['require_numbers']), + require_symbols=bool(p['require_symbols']), + require_uppercase_characters=bool(p['require_uppercase_characters'])) diff -Nru python-boto-2.34.0/tests/integration/kinesis/test_kinesis.py python-boto-2.38.0/tests/integration/kinesis/test_kinesis.py --- python-boto-2.34.0/tests/integration/kinesis/test_kinesis.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/kinesis/test_kinesis.py 2015-04-09 18:57:51.000000000 +0000 @@ -55,31 +55,57 @@ else: raise TimeoutError('Stream is still not active, aborting...') + # Make a tag. + kinesis.add_tags_to_stream(stream_name='test', tags={'foo': 'bar'}) + + # Check that the correct tag is there. + response = kinesis.list_tags_for_stream(stream_name='test') + self.assertEqual(len(response['Tags']), 1) + self.assertEqual(response['Tags'][0], + {'Key':'foo', 'Value': 'bar'}) + + # Remove the tag and ensure it is removed. + kinesis.remove_tags_from_stream(stream_name='test', tag_keys=['foo']) + response = kinesis.list_tags_for_stream(stream_name='test') + self.assertEqual(len(response['Tags']), 0) + # Get ready to process some data from the stream response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON') shard_iterator = response['ShardIterator'] # Write some data to the stream data = 'Some data ...' + record = { + 'Data': data, + 'PartitionKey': data, + } response = kinesis.put_record('test', data, data) + response = kinesis.put_records([record, record.copy()], 'test') # Wait for the data to show up tries = 0 + num_collected = 0 + num_expected_records = 3 + collected_records = [] while tries < 100: tries += 1 time.sleep(1) response = kinesis.get_records(shard_iterator) shard_iterator = response['NextShardIterator'] - - if len(response['Records']): + for record in response['Records']: + if 'Data' in record: + collected_records.append(record['Data']) + num_collected += 1 + if num_collected >= num_expected_records: + self.assertEqual(num_expected_records, num_collected) break else: raise TimeoutError('No records found, aborting...') # Read the data, which should be the same as what we wrote - self.assertEqual(1, len(response['Records'])) - self.assertEqual(data, response['Records'][0]['Data']) + for record in collected_records: + self.assertEqual(data, record) def test_describe_non_existent_stream(self): with self.assertRaises(ResourceNotFoundException) as cm: diff -Nru python-boto-2.34.0/tests/integration/kms/test_kms.py python-boto-2.38.0/tests/integration/kms/test_kms.py --- python-boto-2.34.0/tests/integration/kms/test_kms.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/kms/test_kms.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.kms.exceptions import NotFoundException +from tests.compat import unittest + + +class TestKMS(unittest.TestCase): + def setUp(self): + self.kms = boto.connect_kms() + + def test_list_keys(self): + response = self.kms.list_keys() + self.assertIn('Keys', response) + + def test_handle_not_found_exception(self): + with self.assertRaises(NotFoundException): + # Describe some key that does not exists + self.kms.describe_key( + key_id='nonexistant_key', + ) diff -Nru python-boto-2.34.0/tests/integration/route53/test_zone.py python-boto-2.38.0/tests/integration/route53/test_zone.py --- python-boto-2.34.0/tests/integration/route53/test_zone.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/route53/test_zone.py 2015-04-09 18:57:51.000000000 +0000 @@ -27,6 +27,7 @@ from nose.plugins.attrib import attr from boto.route53.connection import Route53Connection from boto.exception import TooManyRecordsException +from boto.vpc import VPCConnection @attr(route53=True) @@ -151,7 +152,9 @@ identifier=('baz', 'us-east-1')) self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5', identifier=('bam', 'us-west-1')) - self.assertRaises(TooManyRecordsException, lambda: self.zone.get_a('exception.%s' % self.base_domain)) + self.assertRaises(TooManyRecordsException, + lambda: self.zone.get_a('exception.%s' % + self.base_domain)) self.zone.delete_a('exception.%s' % self.base_domain, all=True) @classmethod @@ -161,5 +164,33 @@ self.zone.delete_mx(self.base_domain) self.zone.delete() + +@attr(route53=True) +class TestRoute53PrivateZone(unittest.TestCase): + @classmethod + def setUpClass(self): + time_str = str(int(time.time())) + self.route53 = Route53Connection() + self.base_domain = 'boto-private-zone-test-%s.com' % time_str + self.vpc = VPCConnection() + self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16') + # tag the vpc to make it easily identifiable if things go spang + self.test_vpc.add_tag("Name", self.base_domain) + self.zone = self.route53.get_zone(self.base_domain) + if self.zone is not None: + self.zone.delete() + + def test_create_private_zone(self): + self.zone = self.route53.create_hosted_zone(self.base_domain, + private_zone=True, + vpc_id=self.test_vpc.id, + vpc_region='us-east-1') + + @classmethod + def tearDownClass(self): + if self.zone is not None: + self.zone.delete() + self.test_vpc.delete() + if __name__ == '__main__': unittest.main(verbosity=3) diff -Nru python-boto-2.34.0/tests/integration/s3/test_connect_to_region.py python-boto-2.38.0/tests/integration/s3/test_connect_to_region.py --- python-boto-2.34.0/tests/integration/s3/test_connect_to_region.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/integration/s3/test_connect_to_region.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2014 Steven Richards +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Unit test for passing in 'host' parameter and overriding the region +See issue: #2522 +""" +from tests.compat import unittest + +from boto.s3.connection import S3Connection +from boto.s3 import connect_to_region + +class S3SpecifyHost(unittest.TestCase): + s3 = True + + def testWithNonAWSHost(self): + connect_args = dict({'host':'www.not-a-website.com'}) + connection = connect_to_region('us-east-1', **connect_args) + self.assertEquals('www.not-a-website.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testSuccessWithHostOverrideRegion(self): + connect_args = dict({'host':'s3.amazonaws.com'}) + connection = connect_to_region('us-west-2', **connect_args) + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + + def testSuccessWithDefaultUSWest1(self): + connection = connect_to_region('us-west-2') + self.assertEquals('s3-us-west-2.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testSuccessWithDefaultUSEast1(self): + connection = connect_to_region('us-east-1') + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testDefaultWithInvalidHost(self): + connect_args = dict({'host':''}) + connection = connect_to_region('us-west-2', **connect_args) + self.assertEquals('s3-us-west-2.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testDefaultWithInvalidHostNone(self): + connect_args = dict({'host':None}) + connection = connect_to_region('us-east-1', **connect_args) + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def tearDown(self): + self = connection = connect_args = None diff -Nru python-boto-2.34.0/tests/integration/s3/test_key.py python-boto-2.38.0/tests/integration/s3/test_key.py --- python-boto-2.34.0/tests/integration/s3/test_key.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/s3/test_key.py 2015-04-09 18:57:51.000000000 +0000 @@ -27,8 +27,9 @@ from tests.unit import unittest import time -from boto.compat import six, StringIO, urllib +import boto.s3 +from boto.compat import six, StringIO, urllib from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.exception import S3ResponseError @@ -459,3 +460,75 @@ kn = self.bucket.new_key("testkey_for_sse_c") ks = kn.get_contents_as_string(headers=header) self.assertEqual(ks, content.encode('utf-8')) + + +class S3KeySigV4Test(unittest.TestCase): + def setUp(self): + self.conn = boto.s3.connect_to_region('eu-central-1') + self.bucket_name = 'boto-sigv4-key-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name, + location='eu-central-1') + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_put_get_with_non_string_headers_key(self): + k = Key(self.bucket) + k.key = 'foobar' + body = 'This is a test of S3' + # A content-length header will be added to this request since it + # has a body. + k.set_contents_from_string(body) + # Set a header that has an integer. This checks for a bug where + # the sigv4 signer assumes that all of the headers are strings. + headers = {'Content-Length': 0} + from_s3_key = self.bucket.get_key('foobar', headers=headers) + self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'), + body) + + +class S3KeyVersionCopyTest(unittest.TestCase): + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'boto-key-version-copy-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + self.bucket.configure_versioning(True) + + def tearDown(self): + for key in self.bucket.list_versions(): + key.delete() + self.bucket.delete() + + def test_key_overwrite_and_copy(self): + first_content = "abcdefghijklm" + second_content = "nopqrstuvwxyz" + k = Key(self.bucket, 'testkey') + k.set_contents_from_string(first_content) + # Wait for S3's eventual consistency (may not be necessary) + while self.bucket.get_key('testkey') is None: + time.sleep(5) + # Get the first version_id + first_key = self.bucket.get_key('testkey') + first_version_id = first_key.version_id + # Overwrite the key + k = Key(self.bucket, 'testkey') + k.set_contents_from_string(second_content) + # Wait for eventual consistency + while True: + second_key = self.bucket.get_key('testkey') + if second_key is None or second_key.version_id == first_version_id: + time.sleep(5) + else: + break + # Copy first key (no longer the current version) to a new key + source_key = self.bucket.get_key('testkey', + version_id=first_version_id) + source_key.copy(self.bucket, 'copiedkey') + while self.bucket.get_key('copiedkey') is None: + time.sleep(5) + copied_key = self.bucket.get_key('copiedkey') + copied_key_contents = copied_key.get_contents_as_string() + self.assertEqual(first_content, copied_key_contents) + diff -Nru python-boto-2.34.0/tests/integration/sqs/test_connection.py python-boto-2.38.0/tests/integration/sqs/test_connection.py --- python-boto-2.34.0/tests/integration/sqs/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/sqs/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -242,15 +242,11 @@ def test_get_messages_attributes(self): conn = SQSConnection() current_timestamp = int(time.time()) - queue_name = 'test%d' % int(time.time()) - test = conn.create_queue(queue_name) - self.addCleanup(conn.delete_queue, test) + test = self.create_temp_queue(conn) time.sleep(65) # Put a message in the queue. - m1 = Message() - m1.set_body('This is a test message.') - test.write(m1) + self.put_queue_message(test) self.assertEqual(test.count(), 1) # Check all attributes. @@ -265,9 +261,7 @@ self.assertTrue(first_rec >= current_timestamp) # Put another message in the queue. - m2 = Message() - m2.set_body('This is another test message.') - test.write(m2) + self.put_queue_message(test) self.assertEqual(test.count(), 1) # Check a specific attribute. @@ -279,3 +273,32 @@ self.assertEqual(msg.attributes['ApproximateReceiveCount'], '1') with self.assertRaises(KeyError): msg.attributes['ApproximateFirstReceiveTimestamp'] + + def test_queue_purge(self): + conn = SQSConnection() + test = self.create_temp_queue(conn) + time.sleep(65) + + # Put some messages in the queue. + for x in range(0, 4): + self.put_queue_message(test) + self.assertEqual(test.count(), 4) + + # Now purge the queue + conn.purge_queue(test) + + # Now assert queue count is 0 + self.assertEqual(test.count(), 0) + + def create_temp_queue(self, conn): + current_timestamp = int(time.time()) + queue_name = 'test%d' % int(time.time()) + test = conn.create_queue(queue_name) + self.addCleanup(conn.delete_queue, test) + + return test + + def put_queue_message(self, queue): + m1 = Message() + m1.set_body('This is a test message.') + queue.write(m1) diff -Nru python-boto-2.34.0/tests/integration/sts/test_session_token.py python-boto-2.38.0/tests/integration/sts/test_session_token.py --- python-boto-2.34.0/tests/integration/sts/test_session_token.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/integration/sts/test_session_token.py 2015-04-09 18:57:51.000000000 +0000 @@ -88,4 +88,4 @@ creds = c.decode_authorization_message('b94d27b9934') except BotoServerError as err: self.assertEqual(err.status, 400) - self.assertTrue('Invalid token' in err.body) + self.assertIn('InvalidAuthorizationMessageException', err.body) diff -Nru python-boto-2.34.0/tests/unit/auth/test_query.py python-boto-2.38.0/tests/unit/auth/test_query.py --- python-boto-2.34.0/tests/unit/auth/test_query.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/auth/test_query.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -import copy -from mock import Mock -from tests.unit import unittest - -from boto.auth import QueryAuthHandler -from boto.connection import HTTPRequest - - -class TestQueryAuthHandler(unittest.TestCase): - def setUp(self): - self.provider = Mock() - self.provider.access_key = 'access_key' - self.provider.secret_key = 'secret_key' - self.request = HTTPRequest( - method='GET', - protocol='https', - host='sts.amazonaws.com', - port=443, - path='/', - auth_path=None, - params={ - 'Action': 'AssumeRoleWithWebIdentity', - 'Version': '2011-06-15', - 'RoleSessionName': 'web-identity-federation', - 'ProviderId': '2012-06-01', - 'WebIdentityToken': 'Atza|IQEBLjAsAhRkcxQ', - }, - headers={}, - body='' - ) - - def test_escape_value(self): - auth = QueryAuthHandler('sts.amazonaws.com', - Mock(), self.provider) - # This should **NOT** get escaped. - value = auth._escape_value('Atza|IQEBLjAsAhRkcxQ') - self.assertEqual(value, 'Atza|IQEBLjAsAhRkcxQ') - - def test_build_query_string(self): - auth = QueryAuthHandler('sts.amazonaws.com', - Mock(), self.provider) - query_string = auth._build_query_string(self.request.params) - self.assertEqual(query_string, 'Action=AssumeRoleWithWebIdentity' + \ - '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ - '&Version=2011-06-15&WebIdentityToken=Atza|IQEBLjAsAhRkcxQ') - - def test_add_auth(self): - auth = QueryAuthHandler('sts.amazonaws.com', - Mock(), self.provider) - req = copy.copy(self.request) - auth.add_auth(req) - self.assertEqual(req.path, - '/?Action=AssumeRoleWithWebIdentity' + \ - '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ - '&Version=2011-06-15&WebIdentityToken=Atza|IQEBLjAsAhRkcxQ') diff -Nru python-boto-2.34.0/tests/unit/auth/test_sigv4.py python-boto-2.38.0/tests/unit/auth/test_sigv4.py --- python-boto-2.34.0/tests/unit/auth/test_sigv4.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/auth/test_sigv4.py 2015-04-09 18:57:51.000000000 +0000 @@ -483,6 +483,19 @@ authed_req = self.auth.canonical_request(request) self.assertEqual(authed_req, expected) + def test_non_string_headers(self): + self.awesome_bucket_request.headers['Content-Length'] = 8 + canonical_headers = self.auth.canonical_headers( + self.awesome_bucket_request.headers) + self.assertEqual( + canonical_headers, + 'content-length:8\n' + 'user-agent:Boto\n' + 'x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae' + '41e4649b934ca495991b7852b855\n' + 'x-amz-date:20130605T193245Z' + ) + class FakeS3Connection(object): def __init__(self, *args, **kwargs): @@ -518,8 +531,10 @@ def test_sigv4_non_optional(self): # Requires SigV4. - fake = FakeS3Connection(host='s3.cn-north-1.amazonaws.com.cn') - self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + for region in ['.cn-north', '.eu-central', '-eu-central']: + fake = FakeS3Connection(host='s3' + region + '-1.amazonaws.com') + self.assertEqual( + fake._required_auth_capability(), ['hmac-v4-s3']) def test_sigv4_opt_in_config(self): # Opt-in via the config. diff -Nru python-boto-2.34.0/tests/unit/auth/test_stsanon.py python-boto-2.38.0/tests/unit/auth/test_stsanon.py --- python-boto-2.34.0/tests/unit/auth/test_stsanon.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/auth/test_stsanon.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import copy +from mock import Mock +from tests.unit import unittest + +from boto.auth import STSAnonHandler +from boto.connection import HTTPRequest + + +class TestSTSAnonHandler(unittest.TestCase): + def setUp(self): + self.provider = Mock() + self.provider.access_key = 'access_key' + self.provider.secret_key = 'secret_key' + self.request = HTTPRequest( + method='GET', + protocol='https', + host='sts.amazonaws.com', + port=443, + path='/', + auth_path=None, + params={ + 'Action': 'AssumeRoleWithWebIdentity', + 'Version': '2011-06-15', + 'RoleSessionName': 'web-identity-federation', + 'ProviderId': '2012-06-01', + 'WebIdentityToken': 'Atza|IQEBLjAsAhRkcxQ', + }, + headers={}, + body='' + ) + + def test_escape_value(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + # This is changed from a previous version because this string is + # being passed to the query string and query strings must + # be url encoded. + value = auth._escape_value('Atza|IQEBLjAsAhRkcxQ') + self.assertEqual(value, 'Atza%7CIQEBLjAsAhRkcxQ') + + def test_build_query_string(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + query_string = auth._build_query_string(self.request.params) + self.assertEqual(query_string, 'Action=AssumeRoleWithWebIdentity' + \ + '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ + '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ') + + def test_add_auth(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + req = copy.copy(self.request) + auth.add_auth(req) + self.assertEqual(req.body, + 'Action=AssumeRoleWithWebIdentity' + \ + '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ + '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ') diff -Nru python-boto-2.34.0/tests/unit/awslambda/__init__.py python-boto-2.38.0/tests/unit/awslambda/__init__.py --- python-boto-2.34.0/tests/unit/awslambda/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/awslambda/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/unit/awslambda/test_awslambda.py python-boto-2.38.0/tests/unit/awslambda/test_awslambda.py --- python-boto-2.34.0/tests/unit/awslambda/test_awslambda.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/awslambda/test_awslambda.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,117 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import tempfile +import shutil +import os +import socket + +from boto.compat import json +from boto.awslambda.layer1 import AWSLambdaConnection +from tests.unit import AWSMockServiceTestCase +from tests.compat import mock + + +class TestAWSLambda(AWSMockServiceTestCase): + connection_class = AWSLambdaConnection + + def default_body(self): + return b'{}' + + def test_upload_function_binary(self): + self.set_http_response(status_code=201) + function_data = b'This is my file' + self.service_connection.upload_function( + function_name='my-function', + function_zip=function_data, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + self.assertEqual(self.actual_request.body, function_data) + self.assertEqual( + self.actual_request.headers['Content-Length'], + str(len(function_data)) + ) + self.assertEqual( + self.actual_request.path, + '/2014-11-13/functions/my-function?Handler=myhandler&Mode' + '=event&Role=myrole&Runtime=nodejs' + ) + + def test_upload_function_file(self): + self.set_http_response(status_code=201) + rootdir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, rootdir) + + filename = 'test_file' + function_data = b'This is my file' + full_path = os.path.join(rootdir, filename) + + with open(full_path, 'wb') as f: + f.write(function_data) + + with open(full_path, 'rb') as f: + self.service_connection.upload_function( + function_name='my-function', + function_zip=f, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + self.assertEqual(self.actual_request.body.read(), + function_data) + self.assertEqual( + self.actual_request.headers['Content-Length'], + str(len(function_data)) + ) + self.assertEqual( + self.actual_request.path, + '/2014-11-13/functions/my-function?Handler=myhandler&Mode' + '=event&Role=myrole&Runtime=nodejs' + ) + + def test_upload_function_unseekable_file_no_tell(self): + sock = socket.socket() + with self.assertRaises(TypeError): + self.service_connection.upload_function( + function_name='my-function', + function_zip=sock, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + + def test_upload_function_unseekable_file_cannot_tell(self): + mock_file = mock.Mock() + mock_file.tell.side_effect = IOError + with self.assertRaises(TypeError): + self.service_connection.upload_function( + function_name='my-function', + function_zip=mock_file, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) diff -Nru python-boto-2.34.0/tests/unit/cloudformation/test_connection.py python-boto-2.38.0/tests/unit/cloudformation/test_connection.py --- python-boto-2.34.0/tests/unit/cloudformation/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/cloudformation/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -134,11 +134,14 @@ api_response = self.service_connection.update_stack( 'stack_name', template_url='http://url', template_body=SAMPLE_TEMPLATE, - parameters=[('KeyName', 'myKeyName')], + parameters=[('KeyName', 'myKeyName'), ('KeyName2', "", True), + ('KeyName3', "", False), ('KeyName4', None, True), + ('KeyName5', "Ignore Me", True)], tags={'TagKey': 'TagValue'}, notification_arns=['arn:notify1', 'arn:notify2'], disable_rollback=True, - timeout_in_minutes=20 + timeout_in_minutes=20, + use_previous_template=True ) self.assert_request_parameters({ 'Action': 'UpdateStack', @@ -148,6 +151,14 @@ 'NotificationARNs.member.2': 'arn:notify2', 'Parameters.member.1.ParameterKey': 'KeyName', 'Parameters.member.1.ParameterValue': 'myKeyName', + 'Parameters.member.2.ParameterKey': 'KeyName2', + 'Parameters.member.2.UsePreviousValue': 'true', + 'Parameters.member.3.ParameterKey': 'KeyName3', + 'Parameters.member.3.ParameterValue': '', + 'Parameters.member.4.UsePreviousValue': 'true', + 'Parameters.member.4.ParameterKey': 'KeyName4', + 'Parameters.member.5.UsePreviousValue': 'true', + 'Parameters.member.5.ParameterKey': 'KeyName5', 'Tags.member.1.Key': 'TagKey', 'Tags.member.1.Value': 'TagValue', 'StackName': 'stack_name', @@ -155,6 +166,7 @@ 'TimeoutInMinutes': 20, 'TemplateBody': SAMPLE_TEMPLATE, 'TemplateURL': 'http://url', + 'UsePreviousTemplate': 'true', }) def test_update_stack_with_minimum_args(self): @@ -698,7 +710,7 @@ self.set_http_response(status_code=200) api_response = self.service_connection.set_stack_policy('stack-id', stack_policy_body='{}') - self.assertEqual(api_response['Some'], 'content') + self.assertDictEqual(api_response, {'SetStackPolicyResult': {'Some': 'content'}}) self.assert_request_parameters({ 'Action': 'SetStackPolicy', 'ContentType': 'JSON', diff -Nru python-boto-2.34.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py python-boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py --- python-boto-2.34.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,127 @@ +#!/usr/bin env python +import json +import mock +from tests.unit import AWSMockServiceTestCase +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + + +class CloudSearchDomainConnectionTest(AWSMockServiceTestCase): + connection_class = CloudSearchDomainConnection + + domain_status = """{ + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": false, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + }""" + + def create_service_connection(self, **kwargs): + if kwargs.get('host', None) is None: + kwargs['host'] = 'search-demo.us-east-1.cloudsearch.amazonaws.com' + return super(CloudSearchDomainConnectionTest, self).\ + create_service_connection(**kwargs) + + def test_get_search_service(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + search_service = domain.get_search_service() + + self.assertEqual(search_service.sign_request, True) + + def test_get_document_service(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + document_service = domain.get_document_service() + + self.assertEqual(document_service.sign_request, True) + + def test_search_with_auth(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + search_service = domain.get_search_service() + + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': { + 'id': '12341', + 'fields': { + 'title': 'Document 1', + 'rank': 1 + } + } + }, + 'status': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + self.set_http_response(status_code=200, body=json.dumps(response)) + search_service.domain_connection = self.service_connection + resp = search_service.search() + + headers = self.actual_request.headers + + self.assertIsNotNone(headers.get('Authorization')) + + def test_upload_documents_with_auth(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + document_service = domain.get_document_service() + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + document = { + "id": "1234", + "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"] + } + + self.set_http_response(status_code=200, body=json.dumps(response)) + document_service.domain_connection = self.service_connection + document_service.add("1234", document) + resp = document_service.commit() + + headers = self.actual_request.headers + + self.assertIsNotNone(headers.get('Authorization')) + + def test_no_host_provided(self): + # A host must be provided or a error is thrown. + with self.assertRaises(ValueError): + CloudSearchDomainConnection( + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key' + ) diff -Nru python-boto-2.34.0/tests/unit/cloudtrail/test_layer1.py python-boto-2.38.0/tests/unit/cloudtrail/test_layer1.py --- python-boto-2.34.0/tests/unit/cloudtrail/test_layer1.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/cloudtrail/test_layer1.py 2015-04-09 18:57:51.000000000 +0000 @@ -25,7 +25,7 @@ def test_describe(self): self.set_http_response(status_code=200) api_response = self.service_connection.describe_trails() - + self.assertEqual(1, len(api_response['trailList'])) self.assertEqual('test', api_response['trailList'][0]['Name']) @@ -38,7 +38,7 @@ self.set_http_response(status_code=200) api_response = self.service_connection.describe_trails( trail_name_list=['test']) - + self.assertEqual(1, len(api_response['trailList'])) self.assertEqual('test', api_response['trailList'][0]['Name']) @@ -67,13 +67,15 @@ def test_create(self): self.set_http_response(status_code=200) - trail = {'Name': 'test', 'S3BucketName': 'cloudtrail-1', - 'SnsTopicName': 'cloudtrail-1', - 'IncludeGlobalServiceEvents': False} - - api_response = self.service_connection.create_trail(trail=trail) - - self.assertEqual(trail, api_response['trail']) + api_response = self.service_connection.create_trail( + 'test', 'cloudtrail-1', sns_topic_name='cloudtrail-1', + include_global_service_events=False) + + self.assertEqual('test', api_response['trail']['Name']) + self.assertEqual('cloudtrail-1', api_response['trail']['S3BucketName']) + self.assertEqual('cloudtrail-1', api_response['trail']['SnsTopicName']) + self.assertEqual(False, + api_response['trail']['IncludeGlobalServiceEvents']) target = self.actual_request.headers['X-Amz-Target'] self.assertTrue('CreateTrail' in target) diff -Nru python-boto-2.34.0/tests/unit/dynamodb/test_types.py python-boto-2.38.0/tests/unit/dynamodb/test_types.py --- python-boto-2.34.0/tests/unit/dynamodb/test_types.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/dynamodb/test_types.py 2015-04-09 18:57:51.000000000 +0000 @@ -45,6 +45,12 @@ {'B': 'AQ=='}) self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])), {'BS': ['AQ==']}) + self.assertEqual(dynamizer.encode(['foo', 54, [1]]), + {'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}) + self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}), + {'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}) + self.assertEqual(dynamizer.encode(None), {'NULL': True}) + self.assertEqual(dynamizer.encode(False), {'BOOL': False}) def test_decoding_to_dynamodb(self): dynamizer = types.Dynamizer() @@ -58,6 +64,12 @@ self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01')) self.assertEqual(dynamizer.decode({'BS': ['AQ==']}), set([types.Binary(b'\x01')])) + self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}), + ['foo', 54, [1]]) + self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}), + {'foo': 'bar', 'hoge': {'sub': 1}}) + self.assertEqual(dynamizer.decode({'NULL': True}), None) + self.assertEqual(dynamizer.decode({'BOOL': False}), False) def test_float_conversion_errors(self): dynamizer = types.Dynamizer() @@ -68,6 +80,10 @@ with self.assertRaises(DynamoDBNumberError): dynamizer.encode(1.1) + def test_non_boolean_conversions(self): + dynamizer = types.NonBooleanDynamizer() + self.assertEqual(dynamizer.encode(True), {'N': '1'}) + def test_lossy_float_conversions(self): dynamizer = types.LossyFloatDynamizer() # Just testing the differences here, specifically float conversions: diff -Nru python-boto-2.34.0/tests/unit/dynamodb2/test_table.py python-boto-2.38.0/tests/unit/dynamodb2/test_table.py --- python-boto-2.34.0/tests/unit/dynamodb2/test_table.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/dynamodb2/test_table.py 2015-04-09 18:57:51.000000000 +0000 @@ -823,6 +823,12 @@ self.assertFalse(self.create_item({})) +class ItemFromItemTestCase(ItemTestCase): + def setUp(self): + super(ItemFromItemTestCase, self).setUp() + self.johndoe = self.create_item(self.johndoe) + + def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None): if exclusive_start_key is None: exclusive_start_key = -1 @@ -1124,6 +1130,13 @@ # Make sure we won't check for results in the future. self.assertFalse(self.results._results_left) + def test_fetch_more_empty(self): + self.results.to_call(lambda keys: {'results': [], 'last_key': None}) + + self.results.fetch_more() + self.assertEqual(self.results._results, []) + self.assertRaises(StopIteration, self.results.next) + def test_iteration(self): # First page. self.assertEqual(next(self.results), 'hello alice') @@ -1580,6 +1593,143 @@ } }) + def test_create_global_secondary_index(self): + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.create_global_secondary_index( + global_index=GlobalAllIndex( + 'JustCreatedIndex', + parts=[ + HashKey('requiredHashKey') + ], + throughput={ + 'read': 2, + 'write': 2 + } + ) + ) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Create': { + 'IndexName': 'JustCreatedIndex', + 'KeySchema': [ + { + 'KeyType': 'HASH', + 'AttributeName': 'requiredHashKey' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + }, + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 2 + } + } + } + ], + attribute_definitions=[ + { + 'AttributeName': 'requiredHashKey', + 'AttributeType': 'S' + } + ] + ) + + def test_delete_global_secondary_index(self): + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.delete_global_secondary_index('RandomGSIIndex') + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Delete': { + 'IndexName': 'RandomGSIIndex', + } + } + ] + ) + + def test_update_global_secondary_index(self): + # Updating a single global secondary index + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.update_global_secondary_index(global_indexes={ + 'A_IndexToBeUpdated': { + 'read': 5, + 'write': 5 + } + }) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Update': { + 'IndexName': 'A_IndexToBeUpdated', + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + }, + } + } + ] + ) + + # Updating multiple global secondary indexes + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.update_global_secondary_index(global_indexes={ + 'A_IndexToBeUpdated': { + 'read': 5, + 'write': 5 + }, + 'B_IndexToBeUpdated': { + 'read': 9, + 'write': 9 + } + }) + + args, kwargs = mock_update.call_args + self.assertEqual(args, ('users',)) + update = kwargs['global_secondary_index_updates'][:] + update.sort(key=lambda x: x['Update']['IndexName']) + self.assertDictEqual( + update[0], + { + 'Update': { + 'IndexName': 'A_IndexToBeUpdated', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 5, + 'ReadCapacityUnits': 5 + } + } + }) + self.assertDictEqual( + update[1], + { + 'Update': { + 'IndexName': 'B_IndexToBeUpdated', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 9, + 'ReadCapacityUnits': 9 + } + } + }) + def test_delete(self): with mock.patch.object( self.users.connection, @@ -2663,6 +2813,33 @@ self.assertIn('limit', mock_query.call_args[1]) self.assertEqual(10, mock_query.call_args[1]['limit']) + def test_query_count_paginated(self): + def return_side_effect(*args, **kwargs): + if kwargs.get('exclusive_start_key'): + return {'Count': 10, 'LastEvaluatedKey': None} + else: + return { + 'Count': 20, + 'LastEvaluatedKey': { + 'username': { + 'S': 'johndoe' + }, + 'date_joined': { + 'N': '4118642633' + } + } + } + + with mock.patch.object( + self.users.connection, + 'query', + side_effect=return_side_effect + ) as mock_query: + count = self.users.query_count(username__eq='johndoe') + self.assertTrue(isinstance(count, int)) + self.assertEqual(30, count) + self.assertEqual(mock_query.call_count, 2) + def test_private_batch_get(self): expected = { "ConsumedCapacity": { diff -Nru python-boto-2.34.0/tests/unit/ec2/autoscale/test_group.py python-boto-2.38.0/tests/unit/ec2/autoscale/test_group.py --- python-boto-2.34.0/tests/unit/ec2/autoscale/test_group.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/ec2/autoscale/test_group.py 2015-04-09 18:57:51.000000000 +0000 @@ -294,6 +294,10 @@ true false + vpc-12345 + + sg-1234 + @@ -320,6 +324,9 @@ self.assertEqual(response[0].instance_monitoring.enabled, 'true') self.assertEqual(response[0].ebs_optimized, False) self.assertEqual(response[0].block_device_mappings, []) + self.assertEqual(response[0].classic_link_vpc_id, 'vpc-12345') + self.assertEqual(response[0].classic_link_vpc_security_groups, + ['sg-1234']) self.assert_request_parameters({ 'Action': 'DescribeLaunchConfigurations', @@ -367,7 +374,9 @@ associate_public_ip_address=True, volume_type='atype', delete_on_termination=False, - iops=3000 + iops=3000, + classic_link_vpc_id='vpc-1234', + classic_link_vpc_security_groups=['classic_link_group'] ) response = self.service_connection.create_launch_configuration(lc) @@ -389,6 +398,8 @@ 'VolumeType': 'atype', 'DeleteOnTermination': 'false', 'Iops': 3000, + 'ClassicLinkVPCId': 'vpc-1234', + 'ClassicLinkVPCSecurityGroups.member.1': 'classic_link_group' }, ignore_params_values=['Version']) @@ -623,6 +634,69 @@ }, ignore_params_values=['Version']) +class TestDetachInstances(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestDetachInstances, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_detach_instances(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'] + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'true', + }, ignore_params_values=['Version']) + + def test_detach_instances_with_decrement_desired_capacity(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'], + True + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'true', + }, ignore_params_values=['Version']) + + def test_detach_instances_without_decrement_desired_capacity(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'], + False + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'false', + }, ignore_params_values=['Version']) + + class TestGetAccountLimits(AWSMockServiceTestCase): connection_class = AutoScaleConnection diff -Nru python-boto-2.34.0/tests/unit/ec2/test_connection.py python-boto-2.38.0/tests/unit/ec2/test_connection.py --- python-boto-2.34.0/tests/unit/ec2/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/ec2/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -1092,7 +1092,8 @@ ReservedInstancesConfiguration( availability_zone='us-west-2c', platform='EC2-VPC', - instance_count=3 + instance_count=3, + instance_type='c3.large' ), ] ) @@ -1102,6 +1103,7 @@ 'ReservedInstancesConfigurationSetItemType.0.AvailabilityZone': 'us-west-2c', 'ReservedInstancesConfigurationSetItemType.0.InstanceCount': 3, 'ReservedInstancesConfigurationSetItemType.0.Platform': 'EC2-VPC', + 'ReservedInstancesConfigurationSetItemType.0.InstanceType': 'c3.large', 'ReservedInstancesId.1': '2567o137-8a55-48d6-82fb-7258506bb497' }, ignore_params_values=[ 'AWSAccessKeyId', 'SignatureMethod', @@ -1420,7 +1422,7 @@ def test_unchanged(self): self.assertEqual( self.service_connection._required_auth_capability(), - ['ec2'] + ['hmac-v4'] ) def test_switched(self): @@ -1638,5 +1640,64 @@ self.assertEqual(result.id, 'vol-1a2b3c4d') self.assertTrue(result.encrypted) + +class TestGetClassicLinkInstances(TestEC2ConnectionBase): + def default_body(self): + return b""" + + f4bf0cc6-5967-4687-9355-90ce48394bd3 + + + i-31489bd8 + vpc-9d24f8f8 + + + sg-9b4343fe + + + + + Name + hello + + + + + + """ + def test_get_classic_link_instances(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_classic_link_instances() + self.assertEqual(len(response), 1) + instance = response[0] + self.assertEqual(instance.id, 'i-31489bd8') + self.assertEqual(instance.vpc_id, 'vpc-9d24f8f8') + self.assertEqual(len(instance.groups), 1) + self.assertEqual(instance.groups[0].id, 'sg-9b4343fe') + self.assertEqual(instance.tags, {'Name': 'hello'}) + + + def test_get_classic_link_instances_params(self): + self.set_http_response(status_code=200) + self.ec2.get_all_classic_link_instances( + instance_ids=['id1', 'id2'], + filters={'GroupId': 'sg-9b4343fe'}, + dry_run=True, + next_token='next_token', + max_results=10 + ) + self.assert_request_parameters({ + 'Action': 'DescribeClassicLinkInstances', + 'InstanceId.1': 'id1', + 'InstanceId.2': 'id2', + 'Filter.1.Name': 'GroupId', + 'Filter.1.Value.1': 'sg-9b4343fe', + 'DryRun': 'true', + 'NextToken': 'next_token', + 'MaxResults': 10}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/unit/ec2/test_reservedinstance.py python-boto-2.38.0/tests/unit/ec2/test_reservedinstance.py --- python-boto-2.34.0/tests/unit/ec2/test_reservedinstance.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/ec2/test_reservedinstance.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,44 @@ +from tests.unit import AWSMockServiceTestCase +from boto.ec2.connection import EC2Connection +from boto.ec2.reservedinstance import ReservedInstance + + +class TestReservedInstancesSet(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + return b""" + + + ididididid + t1.micro + 2014-05-03T14:10:10.944Z + 2014-05-03T14:10:11.000Z + 64800000 + 62.5 + 0.0 + 5 + Linux/UNIX + retired + default + USD + Heavy Utilization + + + Hourly + 0.005 + + + +""" + + def test_get_all_reserved_instaces(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_reserved_instances() + + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], ReservedInstance)) + self.assertEquals(response[0].id, 'ididididid') + self.assertEquals(response[0].instance_count, 5) + self.assertEquals(response[0].start, '2014-05-03T14:10:10.944Z') + self.assertEquals(response[0].end, '2014-05-03T14:10:11.000Z') diff -Nru python-boto-2.34.0/tests/unit/emr/test_connection.py python-boto-2.38.0/tests/unit/emr/test_connection.py --- python-boto-2.34.0/tests/unit/emr/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/emr/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -27,7 +27,7 @@ from boto.emr.connection import EmrConnection from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \ - ClusterStatus, ClusterSummaryList, \ + ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \ ClusterSummary, ClusterTimeline, InstanceInfo, \ InstanceList, InstanceGroupInfo, \ InstanceGroup, InstanceGroupList, JobFlow, \ @@ -62,6 +62,7 @@ analytics test + 10 j-aaaaaaaaaaaab @@ -78,6 +79,7 @@ test job + 20 @@ -99,10 +101,13 @@ self.assertTrue(isinstance(response, ClusterSummaryList)) self.assertEqual(len(response.clusters), 2) + self.assertTrue(isinstance(response.clusters[0], ClusterSummary)) self.assertEqual(response.clusters[0].name, 'analytics test') + self.assertEqual(response.clusters[0].normalizedinstancehours, '10') self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus)) + self.assertEqual(response.clusters[0].status.state, 'TERMINATED') self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline)) @@ -110,6 +115,9 @@ self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z') self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z') + self.assertTrue(isinstance(response.clusters[0].status.statechangereason, ClusterStateChangeReason)) + self.assertEqual(response.clusters[0].status.statechangereason.code, 'USER_REQUEST') + self.assertEqual(response.clusters[0].status.statechangereason.message, 'Terminated by user request') def test_list_clusters_created_before(self): self.set_http_response(status_code=200) @@ -558,6 +566,9 @@ false + ec2-184-0-0-1.us-west-1.compute.amazonaws.com + 10 + my-service-role @@ -587,6 +598,9 @@ self.assertEqual(response.status.state, 'TERMINATED') self.assertEqual(response.applications[0].name, 'hadoop') self.assertEqual(response.applications[0].version, '1.0.3') + self.assertEqual(response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(response.normalizedinstancehours, '10') + self.assertEqual(response.servicerole, 'my-service-role') self.assert_request_parameters({ 'Action': 'DescribeCluster', diff -Nru python-boto-2.34.0/tests/unit/emr/test_emr_responses.py python-boto-2.38.0/tests/unit/emr/test_emr_responses.py --- python-boto-2.34.0/tests/unit/emr/test_emr_responses.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/emr/test_emr_responses.py 2015-04-09 18:57:51.000000000 +0000 @@ -42,6 +42,21 @@ 2009-01-28T21:49:16Z STARTING + + + + + + s3://elasticmapreduce/libs/hue/install-hue + + Install Hue + + + + true + + Hue + MyJobFlowName mybucket/subdir/ diff -Nru python-boto-2.34.0/tests/unit/glacier/test_response.py python-boto-2.38.0/tests/unit/glacier/test_response.py --- python-boto-2.34.0/tests/unit/glacier/test_response.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/glacier/test_response.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,35 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import AWSMockServiceTestCase +from boto.glacier.layer1 import Layer1 +from boto.glacier.response import GlacierResponse + +class TestResponse(AWSMockServiceTestCase): + connection_class = Layer1 + + def test_204_body_isnt_passed_to_json(self): + response = self.create_response(status_code=204,header=[('Content-Type','application/json')]) + result = GlacierResponse(response,response.getheaders()) + self.assertEquals(result.status, response.status) + +if __name__ == '__main__': + unittest.main() diff -Nru python-boto-2.34.0/tests/unit/iam/test_connection.py python-boto-2.38.0/tests/unit/iam/test_connection.py --- python-boto-2.34.0/tests/unit/iam/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/iam/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -393,3 +393,89 @@ ['create_virtual_mfa_device_result'] ['virtual_mfa_device'] ['serial_number'], 'arn:aws:iam::123456789012:mfa/ExampleName') + +class TestGetAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + + true + true + true + false + 12 + true + 90 + false + true + 12 + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_get_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_account_password_policy() + + self.assert_request_parameters( + { + 'Action': 'GetAccountPasswordPolicy', + }, + ignore_params_values=['Version']) + self.assertEquals(response['get_account_password_policy_response'] + ['get_account_password_policy_result']['password_policy'] + ['minimum_password_length'], '12') + + +class TestUpdateAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_update_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.update_account_password_policy(minimum_password_length=88) + + self.assert_request_parameters( + { + 'Action': 'UpdateAccountPasswordPolicy', + 'MinimumPasswordLength': 88 + }, + ignore_params_values=['Version']) + + +class TestDeleteAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_delete_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.delete_account_password_policy() + + self.assert_request_parameters( + { + 'Action': 'DeleteAccountPasswordPolicy' + }, + ignore_params_values=['Version']) diff -Nru python-boto-2.34.0/tests/unit/__init__.py python-boto-2.38.0/tests/unit/__init__.py --- python-boto-2.34.0/tests/unit/__init__.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -63,11 +63,10 @@ request_params = self.actual_request.params.copy() if ignore_params_values is not None: for param in ignore_params_values: - # We still want to check that the ignore_params_values params - # are in the request parameters, we just don't need to check - # their value. - self.assertIn(param, request_params) - del request_params[param] + try: + del request_params[param] + except KeyError: + pass self.assertDictEqual(request_params, params) def set_http_response(self, status_code, reason='', header=[], body=None): diff -Nru python-boto-2.34.0/tests/unit/kinesis/test_kinesis.py python-boto-2.38.0/tests/unit/kinesis/test_kinesis.py --- python-boto-2.34.0/tests/unit/kinesis/test_kinesis.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/kinesis/test_kinesis.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,74 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.kinesis.layer1 import KinesisConnection +from tests.unit import AWSMockServiceTestCase + + +class TestKinesis(AWSMockServiceTestCase): + connection_class = KinesisConnection + + def default_body(self): + return b'{}' + + def test_put_record_binary(self): + self.set_http_response(status_code=200) + self.service_connection.put_record('stream-name', + b'\x00\x01\x02\x03\x04\x05', 'partition-key') + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Data'], 'AAECAwQF') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) + + def test_put_record_string(self): + self.set_http_response(status_code=200) + self.service_connection.put_record('stream-name', + 'data', 'partition-key') + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Data'], 'ZGF0YQ==') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) + + def test_put_records(self): + self.set_http_response(status_code=200) + record_binary = { + 'Data': b'\x00\x01\x02\x03\x04\x05', + 'PartitionKey': 'partition-key' + } + record_str = { + 'Data': 'data', + 'PartitionKey': 'partition-key' + } + self.service_connection.put_records(stream_name='stream-name', + records=[record_binary, record_str]) + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Records'][0]['Data'], 'AAECAwQF') + self.assertEqual(body['Records'][1]['Data'], 'ZGF0YQ==') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) diff -Nru python-boto-2.34.0/tests/unit/kms/__init__.py python-boto-2.38.0/tests/unit/kms/__init__.py --- python-boto-2.34.0/tests/unit/kms/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/kms/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/unit/kms/test_kms.py python-boto-2.38.0/tests/unit/kms/test_kms.py --- python-boto-2.34.0/tests/unit/kms/test_kms.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/kms/test_kms.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,63 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.kms.layer1 import KMSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestKinesis(AWSMockServiceTestCase): + connection_class = KMSConnection + + def default_body(self): + return b'{}' + + def test_binary_input(self): + """ + This test ensures that binary is base64 encoded when it is sent to + the service. + """ + self.set_http_response(status_code=200) + data = b'\x00\x01\x02\x03\x04\x05' + self.service_connection.encrypt(key_id='foo', plaintext=data) + body = json.loads(self.actual_request.body) + self.assertEqual(body['Plaintext'], 'AAECAwQF') + + def test_non_binary_input_for_blobs_fails(self): + """ + This test ensures that only binary is used for blob type parameters. + """ + self.set_http_response(status_code=200) + data = u'\u00e9' + with self.assertRaises(TypeError): + self.service_connection.encrypt(key_id='foo', plaintext=data) + + def test_binary_ouput(self): + """ + This test ensures that the output is base64 decoded before + it is returned to the user. + """ + content = {'Plaintext': 'AAECAwQF'} + self.set_http_response(status_code=200, + body=json.dumps(content).encode('utf-8')) + response = self.service_connection.decrypt(b'some arbitrary value') + self.assertEqual(response['Plaintext'], b'\x00\x01\x02\x03\x04\x05') diff -Nru python-boto-2.34.0/tests/unit/machinelearning/__init__.py python-boto-2.38.0/tests/unit/machinelearning/__init__.py --- python-boto-2.34.0/tests/unit/machinelearning/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/machinelearning/__init__.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff -Nru python-boto-2.34.0/tests/unit/machinelearning/test_machinelearning.py python-boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py --- python-boto-2.34.0/tests/unit/machinelearning/test_machinelearning.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py 2015-04-09 18:57:51.000000000 +0000 @@ -0,0 +1,45 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.machinelearning.layer1 import MachineLearningConnection +from tests.unit import AWSMockServiceTestCase + + +class TestMachineLearning(AWSMockServiceTestCase): + connection_class = MachineLearningConnection + + def test_predict(self): + ml_endpoint = 'mymlmodel.amazonaws.com' + self.set_http_response(status_code=200, body=b'') + self.service_connection.predict( + ml_model_id='foo', record={'Foo': 'bar'}, + predict_endpoint=ml_endpoint) + self.assertEqual(self.actual_request.host, ml_endpoint) + + def test_predict_with_scheme_in_endpoint(self): + ml_endpoint = 'mymlmodel.amazonaws.com' + self.set_http_response(status_code=200, body=b'') + self.service_connection.predict( + ml_model_id='foo', record={'Foo': 'bar'}, + predict_endpoint='https://' + ml_endpoint) + self.assertEqual(self.actual_request.host, ml_endpoint) diff -Nru python-boto-2.34.0/tests/unit/mws/test_connection.py python-boto-2.38.0/tests/unit/mws/test_connection.py --- python-boto-2.34.0/tests/unit/mws/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/mws/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -192,6 +192,14 @@ self.assertTrue('throttled' in str(err.reason)) self.assertEqual(int(err.status), 200) + + def test_sandboxify(self): + # Create one-off connection class that has self._sandboxed = True + conn = MWSConnection(https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sandbox=True) + self.assertEqual(conn._sandboxify('a/bogus/path'), 'a/bogus_Sandbox/path') if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/unit/provider/test_provider.py python-boto-2.38.0/tests/unit/provider/test_provider.py --- python-boto-2.34.0/tests/unit/provider/test_provider.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/provider/test_provider.py 2015-04-09 18:57:51.000000000 +0000 @@ -104,6 +104,17 @@ self.assertEqual(p.secret_key, 'env_secret_key') self.assertEqual(p.security_token, 'env_security_token') + def test_no_credentials_provided(self): + p = provider.Provider( + 'aws', + provider.NO_CREDENTIALS_PROVIDED, + provider.NO_CREDENTIALS_PROVIDED, + provider.NO_CREDENTIALS_PROVIDED + ) + self.assertEqual(p.access_key, provider.NO_CREDENTIALS_PROVIDED) + self.assertEqual(p.secret_key, provider.NO_CREDENTIALS_PROVIDED) + self.assertEqual(p.security_token, provider.NO_CREDENTIALS_PROVIDED) + def test_config_profile_values_are_used(self): self.config = { 'profile dev': { diff -Nru python-boto-2.34.0/tests/unit/route53/test_connection.py python-boto-2.38.0/tests/unit/route53/test_connection.py --- python-boto-2.34.0/tests/unit/route53/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/route53/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -101,6 +101,16 @@ # Unpatch. self.service_connection._retry_handler = orig_retry + def test_private_zone_invalid_vpc_400(self): + self.set_http_response(status_code=400, header=[ + ['Code', 'InvalidVPCId'], + ]) + + with self.assertRaises(DNSServerError) as err: + self.service_connection.create_hosted_zone("example.com.", + private_zone=True) + self.assertTrue('It failed.' in str(err.exception)) + @attr(route53=True) class TestCreateZoneRoute53(AWSMockServiceTestCase): @@ -118,6 +128,7 @@ aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + false 2 @@ -147,10 +158,76 @@ def test_create_hosted_zone(self): self.set_http_response(status_code=201) - response = self.service_connection.create_hosted_zone("example.com.", "my_ref", "this is a comment") + response = self.service_connection.create_hosted_zone("example.com.", + "my_ref", + "a comment") + + self.assertEqual(response['CreateHostedZoneResponse'] + ['DelegationSet']['NameServers'], + ['ns-100.awsdns-01.com', + 'ns-1000.awsdns-01.co.uk', + 'ns-1000.awsdns-01.org', + 'ns-900.awsdns-01.net']) + + self.assertEqual(response['CreateHostedZoneResponse'] + ['HostedZone']['Config']['PrivateZone'], + u'false') + - self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'], - ['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net']) +@attr(route53=True) +class TestCreatePrivateZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreatePrivateZoneRoute53, self).setUp() + + def default_body(self): + return b""" + + + /hostedzone/Z11111 + example.com. + + vpc-1a2b3c4d + us-east-1 + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + true + + 2 + + + /change/C1111111111111 + PENDING + 2014-02-02T10:19:29.928Z + + + + ns-100.awsdns-01.com + ns-1000.awsdns-01.co.uk + ns-1000.awsdns-01.org + ns-900.awsdns-01.net + + + + """ + + def test_create_private_zone(self): + self.set_http_response(status_code=201) + r = self.service_connection.create_hosted_zone("example.com.", + private_zone=True, + vpc_id='vpc-1a2b3c4d', + vpc_region='us-east-1' + ) + + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['Config']['PrivateZone'], u'true') + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['VPC']['VPCId'], u'vpc-1a2b3c4d') + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['VPC']['VPCRegion'], u'us-east-1') @attr(route53=True) @@ -243,10 +320,16 @@ self.set_http_response(status_code=201) response = self.service_connection.get_hosted_zone("Z1111") - self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Id'], '/hostedzone/Z1111') - self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Name'], 'example.com.') - self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'], - ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk']) + self.assertEqual(response['GetHostedZoneResponse'] + ['HostedZone']['Id'], + '/hostedzone/Z1111') + self.assertEqual(response['GetHostedZoneResponse'] + ['HostedZone']['Name'], + 'example.com.') + self.assertEqual(response['GetHostedZoneResponse'] + ['DelegationSet']['NameServers'], + ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', + 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk']) @attr(route53=True) @@ -336,7 +419,9 @@ def test_get_all_rr_sets(self): self.set_http_response(status_code=200) - response = self.service_connection.get_all_rrsets("Z1111", "A", "example.com.") + response = self.service_connection.get_all_rrsets("Z1111", + "A", + "example.com.") self.assertIn(self.actual_request.path, ("/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.", @@ -530,6 +615,36 @@ @attr(route53=True) +class TestGetCheckerIpRanges(AWSMockServiceTestCase): + connection_class = Route53Connection + + def default_body(self): + return b""" + + + 54.183.255.128/26 + 54.228.16.0/26 + 54.232.40.64/26 + 177.71.207.128/26 + 176.34.159.192/26 + + + """ + + def test_get_checker_ip_ranges(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_checker_ip_ranges() + ip_ranges = response['GetCheckerIpRangesResponse']['CheckerIpRanges'] + + self.assertEqual(len(ip_ranges), 5) + self.assertIn('54.183.255.128/26', ip_ranges) + self.assertIn('54.228.16.0/26', ip_ranges) + self.assertIn('54.232.40.64/26', ip_ranges) + self.assertIn('177.71.207.128/26', ip_ranges) + self.assertIn('176.34.159.192/26', ip_ranges) + + +@attr(route53=True) class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase): connection_class = Route53Connection diff -Nru python-boto-2.34.0/tests/unit/s3/test_key.py python-boto-2.38.0/tests/unit/s3/test_key.py --- python-boto-2.34.0/tests/unit/s3/test_key.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/s3/test_key.py 2015-04-09 18:57:51.000000000 +0000 @@ -103,6 +103,54 @@ k.set_contents_from_string('test') k.bucket.list.assert_not_called() + def test_change_storage_class(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('fookey') + + # Mock out Key.copy so we can record calls to it + k.copy = mock.MagicMock() + # Mock out the bucket so we don't actually need to have fake responses + k.bucket = mock.MagicMock() + k.bucket.name = 'mybucket' + + self.assertEqual(k.storage_class, 'STANDARD') + + # The default change_storage_class call should result in a copy to our + # bucket + k.change_storage_class('REDUCED_REDUNDANCY') + k.copy.assert_called_with( + 'mybucket', + 'fookey', + reduced_redundancy=True, + preserve_acl=True, + validate_dst_bucket=True, + ) + + def test_change_storage_class_new_bucket(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('fookey') + + # Mock out Key.copy so we can record calls to it + k.copy = mock.MagicMock() + # Mock out the bucket so we don't actually need to have fake responses + k.bucket = mock.MagicMock() + k.bucket.name = 'mybucket' + + self.assertEqual(k.storage_class, 'STANDARD') + # Specifying a different dst_bucket should result in a copy to the new + # bucket + k.copy.reset_mock() + k.change_storage_class('REDUCED_REDUNDANCY', dst_bucket='yourbucket') + k.copy.assert_called_with( + 'yourbucket', + 'fookey', + reduced_redundancy=True, + preserve_acl=True, + validate_dst_bucket=True, + ) + def counter(fn): def _wrapper(*args, **kwargs): diff -Nru python-boto-2.34.0/tests/unit/ses/test_identity.py python-boto-2.38.0/tests/unit/ses/test_identity.py --- python-boto-2.34.0/tests/unit/ses/test_identity.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/ses/test_identity.py 2015-04-09 18:57:51.000000000 +0000 @@ -39,7 +39,7 @@ - amazon.com + test@amazon.com true Success @@ -50,6 +50,13 @@ + + secondtest@amazon.com + + false + NotStarted + + @@ -61,13 +68,17 @@ self.set_http_response(status_code=200) response = self.service_connection\ - .get_identity_dkim_attributes(['test@amazon.com']) + .get_identity_dkim_attributes(['test@amazon.com', 'secondtest@amazon.com']) response = response['GetIdentityDkimAttributesResponse'] result = response['GetIdentityDkimAttributesResult'] - attributes = result['DkimAttributes']['entry']['value'] + + first_entry = result['DkimAttributes'][0] + entry_key = first_entry['key'] + attributes = first_entry['value'] tokens = attributes['DkimTokens'] + self.assertEqual(entry_key, 'test@amazon.com') self.assertEqual(ListElement, type(tokens)) self.assertEqual(3, len(tokens)) self.assertEqual('vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f', @@ -77,6 +88,16 @@ self.assertEqual('wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2', tokens[2]) + second_entry = result['DkimAttributes'][1] + entry_key = second_entry['key'] + attributes = second_entry['value'] + dkim_enabled = attributes['DkimEnabled'] + dkim_verification_status = attributes['DkimVerificationStatus'] + + self.assertEqual(entry_key, 'secondtest@amazon.com') + self.assertEqual(dkim_enabled, 'false') + self.assertEqual(dkim_verification_status, 'NotStarted') + class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase): connection_class = SESConnection diff -Nru python-boto-2.34.0/tests/unit/sqs/test_message.py python-boto-2.38.0/tests/unit/sqs/test_message.py --- python-boto-2.34.0/tests/unit/sqs/test_message.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/sqs/test_message.py 2015-04-09 18:57:51.000000000 +0000 @@ -23,6 +23,7 @@ from boto.sqs.message import MHMessage from boto.sqs.message import RawMessage +from boto.sqs.message import Message from boto.sqs.bigmessage import BigMessage from boto.exception import SQSDecodeError @@ -69,6 +70,20 @@ self.assertEquals(message.id, sample_value) self.assertEquals(message.receipt_handle, sample_value) + @attr(sqs=True) + def test_encode_bytes_message(self): + message = Message() + body = b'\x00\x01\x02\x03\x04\x05' + message.set_body(body) + self.assertEqual(message.get_body_encoded(), 'AAECAwQF') + + @attr(sqs=True) + def test_encode_string_message(self): + message = Message() + body = 'hello world' + message.set_body(body) + self.assertEqual(message.get_body_encoded(), 'aGVsbG8gd29ybGQ=') + class TestBigMessage(unittest.TestCase): diff -Nru python-boto-2.34.0/tests/unit/swf/test_layer2_domain.py python-boto-2.38.0/tests/unit/swf/test_layer2_domain.py --- python-boto-2.34.0/tests/unit/swf/test_layer2_domain.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/swf/test_layer2_domain.py 2015-04-09 18:57:51.000000000 +0000 @@ -11,6 +11,7 @@ self.domain = Domain(name='test-domain', description='My test domain') self.domain.aws_access_key_id = 'inheritable access key' self.domain.aws_secret_access_key = 'inheritable secret key' + self.domain.region = 'test-region' def test_domain_instantiation(self): self.assertEquals('test-domain', self.domain.name) @@ -47,6 +48,7 @@ for activity_type in activity_types: self.assertIsInstance(activity_type, ActivityType) self.assertTrue(activity_type.name in expected_names) + self.assertEquals(self.domain.region, activity_type.region) def test_domain_list_workflows(self): self.domain._swf.list_workflow_types.return_value = { @@ -68,6 +70,7 @@ self.assertEquals(self.domain.aws_access_key_id, workflow_type.aws_access_key_id) self.assertEquals(self.domain.aws_secret_access_key, workflow_type.aws_secret_access_key) self.assertEquals(self.domain.name, workflow_type.domain) + self.assertEquals(self.domain.region, workflow_type.region) def test_domain_list_executions(self): self.domain._swf.list_open_workflow_executions.return_value = { @@ -107,6 +110,7 @@ self.assertEquals(self.domain.aws_access_key_id, wf_execution.aws_access_key_id) self.assertEquals(self.domain.aws_secret_access_key, wf_execution.aws_secret_access_key) self.assertEquals(self.domain.name, wf_execution.domain) + self.assertEquals(self.domain.region, wf_execution.region) if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/unit/test_connection.py python-boto-2.38.0/tests/unit/test_connection.py --- python-boto-2.34.0/tests/unit/test_connection.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/test_connection.py 2015-04-09 18:57:51.000000000 +0000 @@ -387,6 +387,26 @@ 'POST') self.assertEqual(resp.read(), b"{'test': 'success'}") + def test_unhandled_exception(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/temp_exception/' % self.region.endpoint, + responses=[]) + + def fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + raise socket.timeout('fake error') + + socket.create_connection = fake_connection + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + conn.num_retries = 0 + with self.assertRaises(socket.error): + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + '/temp_exception/', + 'POST') + def test_connection_close(self): """Check connection re-use after close header is received""" HTTPretty.register_uri(HTTPretty.POST, @@ -504,6 +524,16 @@ {'Some-Header': 'should%20be%20url%20encoded', 'User-Agent': UserAgent}) + def test_content_length_str(self): + request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None, + None, {}, {}, 'Body') + mock_connection = mock.Mock() + request.authorize(mock_connection) + + # Ensure Content-Length header is a str. This is more explicit than + # relying on other code cast the value later. (Python 2.7.0, for + # example, assumes headers are of type str.) + self.assertIsInstance(request.headers['Content-Length'], str) if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.34.0/tests/unit/vpc/test_vpc.py python-boto-2.38.0/tests/unit/vpc/test_vpc.py --- python-boto-2.34.0/tests/unit/vpc/test_vpc.py 2014-10-23 16:19:50.000000000 +0000 +++ python-boto-2.38.0/tests/unit/vpc/test_vpc.py 2015-04-09 18:57:51.000000000 +0000 @@ -3,6 +3,7 @@ from tests.unit import AWSMockServiceTestCase from boto.vpc import VPCConnection, VPC +from boto.ec2.securitygroup import SecurityGroup DESCRIBE_VPCS = b''' @@ -138,5 +139,229 @@ 'Version']) self.assertEquals(api_response, True) + +class TestGetAllClassicLinkVpc(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 2484655d-d669-4950-bf55-7ba559805d36 + + + vpc-6226ab07 + false + + + Name + hello[ + + + + + vpc-9d24f8f8 + true + + + + + """ + + def test_get_all_classic_link_vpcs(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_classic_link_vpcs() + self.assertEqual(len(response), 2) + vpc = response[0] + self.assertEqual(vpc.id, 'vpc-6226ab07') + self.assertEqual(vpc.classic_link_enabled, 'false') + self.assertEqual(vpc.tags, {'Name': 'hello'}) + + def test_get_all_classic_link_vpcs_params(self): + self.set_http_response(status_code=200) + self.service_connection.get_all_classic_link_vpcs( + vpc_ids=['id1', 'id2'], + filters={'GroupId': 'sg-9b4343fe'}, + dry_run=True, + ) + self.assert_request_parameters({ + 'Action': 'DescribeVpcClassicLink', + 'VpcId.1': 'id1', + 'VpcId.2': 'id2', + 'Filter.1.Name': 'GroupId', + 'Filter.1.Value.1': 'sg-9b4343fe', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestVpcClassicLink(AWSMockServiceTestCase): + connection_class = VPCConnection + + def setUp(self): + super(TestVpcClassicLink, self).setUp() + self.vpc = VPC(self.service_connection) + self.vpc_id = 'myid' + self.vpc.id = self.vpc_id + + +class TestAttachClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 88673bdf-cd16-40bf-87a1-6132fec47257 + true + + """ + + def test_attach_classic_link_instance_string_groups(self): + groups = ['sg-foo', 'sg-bar'] + + self.set_http_response(status_code=200) + response = self.vpc.attach_classic_instance( + instance_id='my_instance_id', + groups=groups, + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AttachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'SecurityGroupId.1': 'sg-foo', + 'SecurityGroupId.2': 'sg-bar', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + def test_attach_classic_link_instance_object_groups(self): + sec_group_1 = SecurityGroup() + sec_group_1.id = 'sg-foo' + + sec_group_2 = SecurityGroup() + sec_group_2.id = 'sg-bar' + + groups = [sec_group_1, sec_group_2] + + self.set_http_response(status_code=200) + response = self.vpc.attach_classic_instance( + instance_id='my_instance_id', + groups=groups, + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AttachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'SecurityGroupId.1': 'sg-foo', + 'SecurityGroupId.2': 'sg-bar', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestDetachClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 5565033d-1321-4eef-b121-6aa46f152ed7 + true + + """ + + def test_detach_classic_link_instance(self): + self.set_http_response(status_code=200) + response = self.vpc.detach_classic_instance( + instance_id='my_instance_id', + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'DetachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestEnableClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 4ab2b2b3-a267-4366-a070-bab853b5927d + true + + """ + + def test_enable_classic_link(self): + self.set_http_response(status_code=200) + response = self.vpc.enable_classic_link( + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'EnableVpcClassicLink', + 'VpcId': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestDisableClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 4ab2b2b3-a267-4366-a070-bab853b5927d + true + + """ + + def test_enable_classic_link(self): + self.set_http_response(status_code=200) + response = self.vpc.disable_classic_link( + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'DisableVpcClassicLink', + 'VpcId': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestUpdateClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 2484655d-d669-4950-bf55-7ba559805d36 + + + myid + true + + + + + """ + + def test_vpc_update_classic_link_enabled(self): + self.vpc.classic_link_enabled = False + self.set_http_response(status_code=200) + self.vpc.update_classic_link_enabled( + dry_run=True, + validate=True + ) + self.assert_request_parameters({ + 'Action': 'DescribeVpcClassicLink', + 'VpcId.1': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + self.assertEqual(self.vpc.classic_link_enabled, 'true') + + if __name__ == '__main__': unittest.main()