diff -Nru python-boto-2.20.1/bin/cq python-boto-2.29.1/bin/cq --- python-boto-2.20.1/bin/cq 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/cq 2014-05-30 20:49:34.000000000 +0000 @@ -57,6 +57,9 @@ region = a if region: c = boto.sqs.connect_to_region(region) + if c is None: + print 'Invalid region (%s)' % region + sys.exit(1) else: c = SQSConnection() if queue_name: diff -Nru python-boto-2.20.1/bin/elbadmin python-boto-2.29.1/bin/elbadmin --- python-boto-2.20.1/bin/elbadmin 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/elbadmin 2014-05-30 20:49:34.000000000 +0000 @@ -72,6 +72,10 @@ for b in elb.get_all_load_balancers(): print "%-20s %s" % (b.name, b.dns_name) +def check_valid_region(conn, region): + if conn is None: + print 'Invalid region (%s)' % region + sys.exit(1) def get(elb, name): """Get details about ELB """ @@ -108,18 +112,20 @@ print # Make map of all instance Id's to Name tags + import boto if not options.region: ec2 = boto.connect_ec2() else: - import boto.ec2.elb ec2 = boto.ec2.connect_to_region(options.region) + check_valid_region(ec2, options.region) instance_health = b.get_instance_health() instances = [state.instance_id for state in instance_health] - names = {} - for i in ec2.get_only_instances(instances): - names[i.id] = i.tags.get('Name', '') + names = dict((k,'') for k in instances) + for i in ec2.get_only_instances(): + if i.id in instances: + names[i.id] = i.tags.get('Name', '') name_column_width = max([4] + [len(v) for k,v in names.iteritems()]) + 2 @@ -254,6 +260,7 @@ else: import boto.ec2.elb elb = boto.ec2.elb.connect_to_region(options.region) + check_valid_region(elb, options.region) print "%s" % (elb.region.endpoint) diff -Nru python-boto-2.20.1/bin/glacier python-boto-2.29.1/bin/glacier --- python-boto-2.20.1/bin/glacier 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/glacier 2014-05-30 20:49:34.000000000 +0000 @@ -84,10 +84,14 @@ def connect(region, debug_level=0, access_key=None, secret_key=None): """ Connect to a specific region """ - return connect_to_region(region, - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - debug=debug_level) + layer2 = connect_to_region(region, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + debug=debug_level) + if layer2 is None: + print 'Invalid region (%s)' % region + sys.exit(1) + return layer2 def list_vaults(region, access_key=None, secret_key=None): diff -Nru python-boto-2.20.1/bin/lss3 python-boto-2.29.1/bin/lss3 --- python-boto-2.20.1/bin/lss3 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/lss3 2014-05-30 20:49:34.000000000 +0000 @@ -1,14 +1,17 @@ #!/usr/bin/env python import boto +from boto.exception import S3ResponseError from boto.s3.connection import OrdinaryCallingFormat + def sizeof_fmt(num): - for x in ['b ','KB','MB','GB','TB', 'XB']: + for x in ['b ', 'KB', 'MB', 'GB', 'TB', 'XB']: if num < 1024.0: return "%3.1f %s" % (num, x) num /= 1024.0 return "%3.1f %s" % (num, x) + def list_bucket(b, prefix=None, marker=None): """List everything in a bucket""" from boto.s3.prefix import Prefix @@ -39,45 +42,63 @@ elif g.permission == "FULL_CONTROL": mode = "-rwxrwx" if isinstance(k, Key): - print "%s\t%s\t%010s\t%s" % (mode, k.last_modified, - sizeof_fmt(size), k.name) + print "%s\t%s\t%010s\t%s" % (mode, k.last_modified, + sizeof_fmt(size), k.name) else: #If it's not a Key object, it doesn't have a last_modified time, so #print nothing instead - print "%s\t%s\t%010s\t%s" % (mode, ' '*24, - sizeof_fmt(size), k.name) + print "%s\t%s\t%010s\t%s" % (mode, ' ' * 24, + sizeof_fmt(size), k.name) total += size - print "="*80 + print "=" * 80 print "\t\tTOTAL: \t%010s \t%i Files" % (sizeof_fmt(total), num) -def list_buckets(s3): + +def list_buckets(s3, display_tags=False): """List all the buckets""" for b in s3.get_all_buckets(): print b.name + if display_tags: + try: + tags = b.get_tags() + for tag in tags[0]: + print " %s:%s" % (tag.key, tag.value) + except S3ResponseError as e: + if e.status != 404: + raise -if __name__ == "__main__": + +def main(): import optparse import sys - if len(sys.argv) < 2: - list_buckets(boto.connect_s3()) - sys.exit(0) - - parser = optparse.OptionParser() + usage = "usage: %prog [options] [BUCKET1] [BUCKET2]" + description = "List all S3 buckets OR list keys in the named buckets" + parser = optparse.OptionParser(description=description, usage=usage) parser.add_option('-m', '--marker', help='The S3 key where the listing starts after it.') + parser.add_option('-t', '--tags', action='store_true', + help='Display tags when listing all buckets.') options, buckets = parser.parse_args() marker = options.marker + if not buckets: + list_buckets(boto.connect_s3(), options.tags) + sys.exit(0) + + if options.tags: + print "-t option only works for the overall bucket list" + sys.exit(1) + pairs = [] mixedCase = False for name in buckets: - if "/" in name: - pairs.append(name.split("/",1)) - else: - pairs.append([name, None]) - if pairs[-1][0].lower() != pairs[-1][0]: - mixedCase = True + if "/" in name: + pairs.append(name.split("/", 1)) + else: + pairs.append([name, None]) + if pairs[-1][0].lower() != pairs[-1][0]: + mixedCase = True if mixedCase: s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat()) @@ -86,3 +107,7 @@ for name, prefix in pairs: list_bucket(s3.get_bucket(name), prefix, marker=marker) + + +if __name__ == "__main__": + main() diff -Nru python-boto-2.20.1/bin/mturk python-boto-2.29.1/bin/mturk --- python-boto-2.20.1/bin/mturk 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/mturk 2014-05-30 20:49:34.000000000 +0000 @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2012 Kodi Arfer +# Copyright 2012, 2014 Kodi Arfer # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -40,6 +40,8 @@ nicknames = {} nickname_pool = set(string.ascii_lowercase) +get_assignments_page_size = 100 + time_units = dict( s = 1, min = 60, @@ -281,10 +283,20 @@ nicknames = {k: v for k, v in nicknames.items() if v != hit} def list_assignments(hit, only_reviewable = False): - assignments = map(digest_assignment, con.get_assignments( - hit_id = hit, - page_size = 100, - status = 'Submitted' if only_reviewable else None)) + # Accumulate all relevant assignments, one page of results at + # a time. + assignments = [] + page = 1 + while True: + rs = con.get_assignments( + hit_id = hit, + page_size = get_assignments_page_size, + page_number = page, + status = 'Submitted' if only_reviewable else None) + assignments += map(digest_assignment, rs) + if len(assignments) >= int(rs.TotalNumResults): + break + page += 1 if interactive: print json.dumps(assignments, sort_keys = True, indent = 4) print ' '.join([a['AssignmentId'] for a in assignments]) @@ -315,6 +327,16 @@ def notify_workers(subject, text, workers): con.notify_workers(workers, subject, text) +def give_qualification(qualification, workers, value = 1, notify = True): + for w in workers: + con.assign_qualification(qualification, w, value, notify) + if interactive: print 'Gave to', w + +def revoke_qualification(qualification, workers, message = None): + for w in workers: + con.revoke_qualification(w, qualification, message) + if interactive: print 'Revoked from', w + # -------------------------------------------------- # Mainline code # -------------------------------------------------- @@ -332,10 +354,10 @@ sub = subs.add_parser('hit', help = 'get information about a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to show') sub.set_defaults(f = show_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('hits', help = 'list all your HITs') @@ -345,7 +367,7 @@ help = 'create a new HIT (external questions only)', epilog = example_config_file, formatter_class = argparse.RawDescriptionHelpFormatter) - sub.add_argument('json_path', + sub.add_argument('JSON_PATH', help = 'path to JSON configuration file for the HIT') sub.add_argument('-u', '--question-url', dest = 'question_url', metavar = 'URL', @@ -357,13 +379,13 @@ type = float, metavar = 'PRICE', help = 'reward amount, in USD') sub.set_defaults(f = make_hit, a = lambda: dict( - unjson(args.json_path).items() + [(k, getattr(args, k)) + unjson(args.JSON_PATH).items() + [(k, getattr(args, k)) for k in ('question_url', 'assignments', 'reward') if getattr(args, k) is not None])) sub = subs.add_parser('extend', help = 'add assignments or time to a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to extend') sub.add_argument('-a', '--assignments', dest = 'assignments', metavar = 'N', type = int, @@ -372,68 +394,95 @@ metavar = 'T', help = 'amount of time to add to the expiration date') sub.set_defaults(f = extend_hit, a = lambda: - [get_hitid(args.hit), args.assignments, + [get_hitid(args.HIT), args.assignments, args.time and parse_duration(args.time)]) sub = subs.add_parser('expire', help = 'force a HIT to expire without deleting it') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to expire') sub.set_defaults(f = expire_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('rm', help = 'delete a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to delete') sub.set_defaults(f = delete_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('as', help = "list a HIT's submitted assignments") - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to get assignments for') sub.add_argument('-r', '--reviewable', dest = 'only_reviewable', action = 'store_true', help = 'show only unreviewed assignments') sub.set_defaults(f = list_assignments, a = lambda: - [get_hitid(args.hit), args.only_reviewable]) + [get_hitid(args.HIT), args.only_reviewable]) for command, fun, helpmsg in [ ('approve', approve_assignments, 'approve assignments'), ('reject', reject_assignments, 'reject assignments'), ('unreject', unreject_assignments, 'approve previously rejected assignments')]: sub = subs.add_parser(command, help = helpmsg) - sub.add_argument('assignment', nargs = '+', + sub.add_argument('ASSIGNMENT', nargs = '+', help = 'ID of an assignment') sub.add_argument('-m', '--message', dest = 'message', metavar = 'TEXT', help = 'feedback message shown to workers') sub.set_defaults(f = fun, a = lambda: - [args.message, args.assignment]) + [args.message, args.ASSIGNMENT]) sub = subs.add_parser('bonus', help = 'give some workers a bonus') - sub.add_argument('amount', type = float, + sub.add_argument('AMOUNT', type = float, help = 'bonus amount, in USD') - sub.add_argument('message', + sub.add_argument('MESSAGE', help = 'the reason for the bonus (shown to workers in an email sent by MTurk)') - sub.add_argument('widaid', nargs = '+', + sub.add_argument('WIDAID', nargs = '+', help = 'a WORKER_ID,ASSIGNMENT_ID pair') sub.set_defaults(f = grant_bonus, a = lambda: - [args.message, args.amount, - [p.split(',') for p in args.widaid]]) + [args.MESSAGE, args.AMOUNT, + [p.split(',') for p in args.WIDAID]]) sub = subs.add_parser('notify', help = 'send a message to some workers') - sub.add_argument('subject', + sub.add_argument('SUBJECT', help = 'subject of the message') - sub.add_argument('message', + sub.add_argument('MESSAGE', help = 'text of the message') - sub.add_argument('worker', nargs = '+', + sub.add_argument('WORKER', nargs = '+', help = 'ID of a worker') sub.set_defaults(f = notify_workers, a = lambda: - [args.subject, args.message, args.worker]) + [args.SUBJECT, args.MESSAGE, args.WORKER]) + + sub = subs.add_parser('give-qual', + help = 'give a qualification to some workers') + sub.add_argument('QUAL', + help = 'ID of the qualification') + sub.add_argument('WORKER', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-v', '--value', dest = 'value', + metavar = 'N', type = int, default = 1, + help = 'value of the qualification') + sub.add_argument('--dontnotify', dest = 'notify', + action = 'store_false', default = True, + help = "don't notify workers") + sub.set_defaults(f = give_qualification, a = lambda: + [args.QUAL, args.WORKER, args.value, args.notify]) + + sub = subs.add_parser('revoke-qual', + help = 'revoke a qualification from some workers') + sub.add_argument('QUAL', + help = 'ID of the qualification') + sub.add_argument('WORKER', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-m', '--message', dest = 'message', + metavar = 'TEXT', + help = 'the reason the qualification was revoked (shown to workers in an email sent by MTurk)') + sub.set_defaults(f = revoke_qualification, a = lambda: + [args.QUAL, args.WORKER, args.message]) args = parser.parse_args() diff -Nru python-boto-2.20.1/bin/route53 python-boto-2.29.1/bin/route53 --- python-boto-2.20.1/bin/route53 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/route53 2014-05-30 20:49:34.000000000 +0000 @@ -131,7 +131,7 @@ for old_value in response.resource_records: change1.add_value(old_value) - change2 = changes.add_change("CREATE", name, type, ttl, + change2 = changes.add_change("UPSERT", name, type, ttl, identifier=identifier, weight=weight) for new_value in newvalues.split(','): change2.add_value(new_value) @@ -148,11 +148,11 @@ continue if response.identifier != identifier or response.weight != weight: continue - change1 = changes.add_change("DELETE", name, type, + change1 = changes.add_change("DELETE", name, type, identifier=response.identifier, weight=response.weight) change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name) - change2 = changes.add_change("CREATE", name, type, identifier=identifier, weight=weight) + change2 = changes.add_change("UPSERT", name, type, identifier=identifier, weight=weight) change2.set_alias(new_alias_hosted_zone_id, new_alias_dns_name) print changes.commit() diff -Nru python-boto-2.20.1/bin/s3put python-boto-2.29.1/bin/s3put --- python-boto-2.20.1/bin/s3put 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/s3put 2014-05-30 20:49:34.000000000 +0000 @@ -173,6 +173,10 @@ _upload() +def check_valid_region(conn, region): + if conn is None: + print 'Invalid region (%s)' % region + sys.exit(1) def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname, reduced, debug, cb, num_cb, acl='private', headers={}, @@ -183,6 +187,7 @@ """ conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret) + check_valid_region(conn, region) conn.debug = debug bucket = conn.get_bucket(bucketname) @@ -298,18 +303,18 @@ aws_secret_access_key = a if o in ('-r', '--reduced'): reduced = True - if o in ('--header'): + if o == '--header': (k, v) = a.split("=", 1) headers[k] = v - if o in ('--host'): + if o == '--host': host = a - if o in ('--multipart'): + if o == '--multipart': if multipart_capable: multipart_requested = True else: print "multipart upload requested but not capable" sys.exit(4) - if o in ('--region'): + if o == '--region': regions = boto.s3.regions() for region_info in regions: if region_info.name == a: @@ -334,6 +339,7 @@ connect_args['host'] = host c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args) + check_valid_region(c, region or DEFAULT_REGION) c.debug = debug b = c.get_bucket(bucket_name, validate=False) diff -Nru python-boto-2.20.1/bin/sdbadmin python-boto-2.29.1/bin/sdbadmin --- python-boto-2.20.1/bin/sdbadmin 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/bin/sdbadmin 2014-05-30 20:49:34.000000000 +0000 @@ -90,6 +90,11 @@ else: domain.from_xml(file) +def check_valid_region(conn, region): + if conn is None: + print 'Invalid region (%s)' % region + sys.exit(1) + def create_db(domain_name, region_name): """Create a new DB @@ -97,6 +102,7 @@ :type domain: str """ sdb = boto.sdb.connect_to_region(region_name) + check_valid_region(sdb, region_name) return sdb.create_domain(domain_name) if __name__ == "__main__": @@ -125,6 +131,7 @@ exit() sdb = boto.sdb.connect_to_region(options.region_name) + check_valid_region(sdb, options.region_name) if options.list: for db in sdb.get_all_domains(): print db diff -Nru python-boto-2.20.1/boto/auth.py python-boto-2.29.1/boto/auth.py --- python-boto-2.20.1/boto/auth.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/auth.py 2014-05-30 20:49:34.000000000 +0000 @@ -36,38 +36,19 @@ import datetime from email.utils import formatdate import hmac +import os import sys import time import urllib +import urlparse import posixpath from boto.auth_handler import AuthHandler from boto.exception import BotoClientError -# -# the following is necessary because of the incompatibilities -# between Python 2.4, 2.5, and 2.6 as well as the fact that some -# people running 2.4 have installed hashlib as a separate module -# this fix was provided by boto user mccormix. -# see: http://code.google.com/p/boto/issues/detail?id=172 -# for more details. -# + try: from hashlib import sha1 as sha from hashlib import sha256 as sha256 - - if sys.version[:3] == "2.4": - # we are using an hmac that expects a .new() method. - class Faker: - def __init__(self, which): - self.which = which - self.digest_size = self.which().digest_size - - def new(self, *args, **kwargs): - return self.which(*args, **kwargs) - - sha = Faker(sha) - sha256 = Faker(sha256) - except ImportError: import sha sha256 = None @@ -129,7 +110,7 @@ capability = ['anon'] def __init__(self, host, config, provider): - AuthHandler.__init__(self, host, config, provider) + super(AnonAuthHandler, self).__init__(host, config, provider) def add_auth(self, http_request, **kwargs): pass @@ -240,7 +221,6 @@ Select the headers from the request that need to be included in the StringToSign. """ - headers_to_sign = {} headers_to_sign = {'Host': self.host} for name, value in http_request.headers.items(): lname = name.lower() @@ -349,7 +329,7 @@ parameter_names = sorted(http_request.params.keys()) pairs = [] for pname in parameter_names: - pval = str(http_request.params[pname]).encode('utf-8') + pval = boto.utils.get_utf8_value(http_request.params[pname]) pairs.append(urllib.quote(pname, safe='') + '=' + urllib.quote(pval, safe='-_~')) return '&'.join(pairs) @@ -361,7 +341,7 @@ return "" l = [] for param in sorted(http_request.params): - value = str(http_request.params[param]) + value = boto.utils.get_utf8_value(http_request.params[param]) l.append('%s=%s' % (urllib.quote(param, safe='-_.~'), urllib.quote(value, safe='-_.~'))) return '&'.join(l) @@ -373,10 +353,17 @@ case, sorting them in alphabetical order and then joining them into a string, separated by newlines. """ - l = sorted(['%s:%s' % (n.lower().strip(), - ' '.join(headers_to_sign[n].strip().split())) - for n in headers_to_sign]) - return '\n'.join(l) + canonical = [] + + for header in headers_to_sign: + c_name = header.lower().strip() + raw_value = headers_to_sign[header] + if '"' in raw_value: + c_value = raw_value.strip() + else: + c_value = ' '.join(raw_value.strip().split()) + canonical.append('%s:%s' % (c_name, c_value)) + return '\n'.join(sorted(canonical)) def signed_headers(self, headers_to_sign): l = ['%s' % n.lower().strip() for n in headers_to_sign] @@ -421,14 +408,11 @@ scope.append('aws4_request') return '/'.join(scope) - def credential_scope(self, http_request): - scope = [] - http_request.timestamp = http_request.headers['X-Amz-Date'][0:8] - scope.append(http_request.timestamp) - # The service_name and region_name either come from: - # * The service_name/region_name attrs or (if these values are None) - # * parsed from the endpoint ..amazonaws.com. - parts = http_request.host.split('.') + def split_host_parts(self, host): + return host.split('.') + + def determine_region_name(self, host): + parts = self.split_host_parts(host) if self.region_name is not None: region_name = self.region_name elif len(parts) > 1: @@ -442,11 +426,25 @@ else: region_name = parts[0] + return region_name + + def determine_service_name(self, host): + parts = self.split_host_parts(host) if self.service_name is not None: service_name = self.service_name else: service_name = parts[0] + return service_name + def credential_scope(self, http_request): + scope = [] + http_request.timestamp = http_request.headers['X-Amz-Date'][0:8] + scope.append(http_request.timestamp) + # The service_name and region_name either come from: + # * The service_name/region_name attrs or (if these values are None) + # * parsed from the endpoint ..amazonaws.com. + region_name = self.determine_region_name(http_request.host) + service_name = self.determine_service_name(http_request.host) http_request.service_name = service_name http_request.region_name = region_name @@ -502,7 +500,10 @@ # Safe to modify req.path here since # the signature will use req.auth_path. req.path = req.path.split('?')[0] - req.path = req.path + '?' + qs + + if qs: + # Don't insert the '?' unless there's actually a query string + req.path = req.path + '?' + qs canonical_request = self.canonical_request(req) boto.log.debug('CanonicalRequest:\n%s' % canonical_request) string_to_sign = self.string_to_sign(req, canonical_request) @@ -516,6 +517,156 @@ req.headers['Authorization'] = ','.join(l) +class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler): + """ + Implements a variant of Version 4 HMAC authorization specific to S3. + """ + capability = ['hmac-v4-s3'] + + def __init__(self, *args, **kwargs): + super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs) + + if self.region_name: + self.region_name = self.clean_region_name(self.region_name) + + def clean_region_name(self, region_name): + if region_name.startswith('s3-'): + return region_name[3:] + + return region_name + + def canonical_uri(self, http_request): + # S3 does **NOT** do path normalization that SigV4 typically does. + # Urlencode the path, **NOT** ``auth_path`` (because vhosting). + path = urlparse.urlparse(http_request.path) + # Because some quoting may have already been applied, let's back it out. + unquoted = urllib.unquote(path.path) + # Requote, this time addressing all characters. + encoded = urllib.quote(unquoted) + return encoded + + def host_header(self, host, http_request): + port = http_request.port + secure = http_request.protocol == 'https' + if ((port == 80 and not secure) or (port == 443 and secure)): + return http_request.host + return '%s:%s' % (http_request.host, port) + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + host_header_value = self.host_header(self.host, http_request) + headers_to_sign = {} + headers_to_sign = {'Host': host_header_value} + for name, value in http_request.headers.items(): + lname = name.lower() + # Hooray for the only difference! The main SigV4 signer only does + # ``Host`` + ``x-amz-*``. But S3 wants pretty much everything + # signed, except for authorization itself. + if not lname in ['authorization']: + headers_to_sign[name] = value + return headers_to_sign + + def determine_region_name(self, host): + # S3's different format(s) of representing region/service from the + # rest of AWS makes this hurt too. + # + # Possible domain formats: + # - s3.amazonaws.com (Classic) + # - s3-us-west-2.amazonaws.com (Specific region) + # - bukkit.s3.amazonaws.com (Vhosted Classic) + # - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region) + # - s3.cn-north-1.amazonaws.com.cn - (Bejing region) + # - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Bejing region) + parts = self.split_host_parts(host) + + if self.region_name is not None: + region_name = self.region_name + else: + # Classic URLs - s3-us-west-2.amazonaws.com + if len(parts) == 3: + region_name = self.clean_region_name(parts[0]) + + # Special-case for Classic. + if region_name == 's3': + region_name = 'us-east-1' + else: + # Iterate over the parts in reverse order. + for offset, part in enumerate(reversed(parts)): + part = part.lower() + + # Look for the first thing starting with 's3'. + # Until there's a ``.s3`` TLD, we should be OK. :P + if part == 's3': + # If it's by itself, the region is the previous part. + region_name = parts[-offset] + break + elif part.startswith('s3-'): + region_name = self.clean_region_name(part) + break + + return region_name + + def determine_service_name(self, host): + # Should this signing mechanism ever be used for anything else, this + # will fail. Consider utilizing the logic from the parent class should + # you find yourself here. + return 's3' + + def mangle_path_and_params(self, req): + """ + Returns a copy of the request object with fixed ``auth_path/params`` + attributes from the original. + """ + modified_req = copy.copy(req) + + # Unlike the most other services, in S3, ``req.params`` isn't the only + # source of query string parameters. + # Because of the ``query_args``, we may already have a query string + # **ON** the ``path/auth_path``. + # Rip them apart, so the ``auth_path/params`` can be signed + # appropriately. + parsed_path = urlparse.urlparse(modified_req.auth_path) + modified_req.auth_path = parsed_path.path + + if modified_req.params is None: + modified_req.params = {} + + raw_qs = parsed_path.query + existing_qs = urlparse.parse_qs( + raw_qs, + keep_blank_values=True + ) + + # ``parse_qs`` will return lists. Don't do that unless there's a real, + # live list provided. + for key, value in existing_qs.items(): + if isinstance(value, (list, tuple)): + if len(value) == 1: + existing_qs[key] = value[0] + + modified_req.params.update(existing_qs) + return modified_req + + def payload(self, http_request): + if http_request.headers.get('x-amz-content-sha256'): + return http_request.headers['x-amz-content-sha256'] + + return super(S3HmacAuthV4Handler, self).payload(http_request) + + def add_auth(self, req, **kwargs): + if not 'x-amz-content-sha256' in req.headers: + if '_sha256' in req.headers: + req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256') + else: + req.headers['x-amz-content-sha256'] = self.payload(req) + + req = self.mangle_path_and_params(req) + return super(S3HmacAuthV4Handler, self).add_auth(req, **kwargs) + + class QueryAuthHandler(AuthHandler): """ Provides pure query construction (no actual signing). @@ -742,3 +893,42 @@ # user could override this with a .boto config that includes user-specific # credentials (for access to user data). return ready_handlers[-1] + + +def detect_potential_sigv4(func): + def _wrapper(self): + if os.environ.get('EC2_USE_SIGV4', False): + return ['hmac-v4'] + + if boto.config.get('ec2', 'use-sigv4', False): + return ['hmac-v4'] + + if hasattr(self, 'region'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. + if getattr(self.region, 'endpoint', ''): + if '.cn-' in self.region.endpoint: + return ['hmac-v4'] + + return func(self) + return _wrapper + + +def detect_potential_s3sigv4(func): + def _wrapper(self): + if os.environ.get('S3_USE_SIGV4', False): + return ['hmac-v4-s3'] + + if boto.config.get('s3', 'use-sigv4', False): + return ['hmac-v4-s3'] + + if hasattr(self, 'host'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. + if '.cn-' in self.host: + return ['hmac-v4-s3'] + + return func(self) + return _wrapper diff -Nru python-boto-2.20.1/boto/beanstalk/__init__.py python-boto-2.29.1/boto/beanstalk/__init__.py --- python-boto-2.20.1/boto/beanstalk/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/beanstalk/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,31 +31,10 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.beanstalk.layer1 - return [RegionInfo(name='us-east-1', - endpoint='elasticbeanstalk.us-east-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='us-west-1', - endpoint='elasticbeanstalk.us-west-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='us-west-2', - endpoint='elasticbeanstalk.us-west-2.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-northeast-1', - endpoint='elasticbeanstalk.ap-northeast-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-southeast-1', - endpoint='elasticbeanstalk.ap-southeast-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-southeast-2', - endpoint='elasticbeanstalk.ap-southeast-2.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='eu-west-1', - endpoint='elasticbeanstalk.eu-west-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='sa-east-1', - endpoint='elasticbeanstalk.sa-east-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - ] + return get_regions( + 'elasticbeanstalk', + connection_cls=boto.beanstalk.layer1.Layer1 + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/beanstalk/layer1.py python-boto-2.29.1/boto/beanstalk/layer1.py --- python-boto-2.20.1/boto/beanstalk/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/beanstalk/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -40,18 +40,18 @@ proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - api_version=None, security_token=None): + api_version=None, security_token=None, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(Layer1, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, - security_token) + security_token, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -237,7 +237,8 @@ version_label=None, template_name=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, - options_to_remove=None): + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): """Launches an environment for the application using a configuration. :type application_name: string @@ -308,6 +309,25 @@ options to remove from the configuration set for this new environment. + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + :raises: TooManyEnvironmentsException, InsufficientPrivilegesException """ @@ -330,6 +350,10 @@ if options_to_remove: self.build_list_params(params, options_to_remove, 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version return self._get_response('CreateEnvironment', params) def create_storage_location(self): @@ -848,9 +872,9 @@ return self._get_response('RetrieveEnvironmentInfo', params) def swap_environment_cnames(self, source_environment_id=None, - source_environment_name=None, - destination_environment_id=None, - destination_environment_name=None): + source_environment_name=None, + destination_environment_id=None, + destination_environment_name=None): """Swaps the CNAMEs of two environments. :type source_environment_id: string @@ -1021,7 +1045,8 @@ def update_environment(self, environment_id=None, environment_name=None, version_label=None, template_name=None, description=None, option_settings=None, - options_to_remove=None): + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): """ Updates the environment description, deploys a new application version, updates the configuration settings to an entirely new @@ -1073,6 +1098,25 @@ :param options_to_remove: A list of custom user-defined configuration options to remove from the configuration set for this environment. + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + :raises: InsufficientPrivilegesException """ params = {} @@ -1093,6 +1137,10 @@ if options_to_remove: self.build_list_params(params, options_to_remove, 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version return self._get_response('UpdateEnvironment', params) def validate_configuration_settings(self, application_name, diff -Nru python-boto-2.20.1/boto/cloudformation/connection.py python-boto-2.29.1/boto/cloudformation/connection.py --- python-boto-2.20.1/boto/cloudformation/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudformation/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -29,9 +30,28 @@ class CloudFormationConnection(AWSQueryConnection): - """ - A Connection to the CloudFormation Service. + AWS CloudFormation + AWS CloudFormation enables you to create and manage AWS + infrastructure deployments predictably and repeatedly. AWS + CloudFormation helps you leverage AWS products such as Amazon EC2, + EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, + highly scalable, cost effective applications without worrying + about creating and configuring the underlying AWS infrastructure. + + With AWS CloudFormation, you declare all of your resources and + dependencies in a template file. The template defines a collection + of resources as a single unit called a stack. AWS CloudFormation + creates and deletes all member resources of the stack together and + manages all dependencies between the resources for you. + + For more information about this product, go to the `CloudFormation + Product Page`_. + + Amazon CloudFormation makes use of other AWS products. If you need + additional technical information about a specific AWS product, you + can find the product's technical documentation at + `http://aws.amazon.com/documentation/`_. """ APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15') DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1') @@ -52,19 +72,21 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - converter=None, security_token=None, validate_certs=True): + converter=None, security_token=None, validate_certs=True, + profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, CloudFormationConnection) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(CloudFormationConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -74,50 +96,117 @@ return {True: "true", False: "false"}[v] def _build_create_or_update_params(self, stack_name, template_body, - template_url, parameters, - notification_arns, disable_rollback, - timeout_in_minutes, capabilities, tags): + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags, stack_policy_during_update_body=None, + stack_policy_during_update_url=None): """ Helper that creates JSON parameters needed by a Stack Create or Stack Update call. :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. :type template_body: string - :param template_body: The template body (JSON string) + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence - - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. - - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. - - :type disable_rollback: bool - :param disable_rollback: Indicates whether or not to rollback on - failure. - - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state. + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). :type capabilities: list - :param capabilities: The list of capabilities you want to allow in - the stack. Currently, the only valid capability is - 'CAPABILITY_IAM'. - - :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: list + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. :rtype: dict :return: JSON parameters represented as a Python dict. @@ -131,7 +220,7 @@ if template_body and template_url: boto.log.warning("If both TemplateBody and TemplateURL are" " specified, only TemplateBody will be honored by the API") - if len(parameters) > 0: + if parameters and len(parameters) > 0: for i, (key, value) in enumerate(parameters): params['Parameters.member.%d.ParameterKey' % (i + 1)] = key params['Parameters.member.%d.ParameterValue' % (i + 1)] = value @@ -142,107 +231,224 @@ for i, (key, value) in enumerate(tags.items()): params['Tags.member.%d.Key' % (i + 1)] = key params['Tags.member.%d.Value' % (i + 1)] = value - if len(notification_arns) > 0: + if notification_arns and len(notification_arns) > 0: self.build_list_params(params, notification_arns, "NotificationARNs.member") if timeout_in_minutes: params['TimeoutInMinutes'] = int(timeout_in_minutes) + if disable_rollback is not None: + params['DisableRollback'] = str( + disable_rollback).lower() + if on_failure is not None: + params['OnFailure'] = on_failure + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + if stack_policy_during_update_body is not None: + params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body + if stack_policy_during_update_url is not None: + params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url return params - def create_stack(self, stack_name, template_body=None, template_url=None, - parameters=[], notification_arns=[], disable_rollback=False, - timeout_in_minutes=None, capabilities=None, tags=None): + def _do_request(self, call, params, path, method): """ - Creates a CloudFormation Stack as specified by the template. + Do a request via ``self.make_request`` and parse the JSON response. - :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks - - :type template_body: string - :param template_body: The template body (JSON string) - - :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence - - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. - - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. - - :type disable_rollback: bool - :param disable_rollback: Indicates whether or not to rollback on - failure. + :type call: string + :param call: Call name, e.g. ``CreateStack`` - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state. + :type params: dict + :param params: Dictionary of call parameters - :type capabilities: list - :param capabilities: The list of capabilities you want to allow in - the stack. Currently, the only valid capability is - 'CAPABILITY_IAM'. + :type path: string + :param path: Server path - :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. + :type method: string + :param method: HTTP method to use - :rtype: string - :return: The unique Stack ID. + :rtype: dict + :return: Parsed JSON response data """ - params = self._build_create_or_update_params(stack_name, - template_body, template_url, parameters, notification_arns, - disable_rollback, timeout_in_minutes, capabilities, tags) - response = self.make_request('CreateStack', params, '/', 'POST') + response = self.make_request(call, params, path, method) body = response.read() if response.status == 200: body = json.loads(body) - return body['CreateStackResponse']['CreateStackResult']['StackId'] + return body else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise self.ResponseError(response.status, response.reason, body=body) + + def create_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=None, + timeout_in_minutes=None, capabilities=None, tags=None, + on_failure=None, stack_policy_body=None, stack_policy_url=None): + """ + Creates a stack as specified in the template. After the call + completes successfully, the stack creation starts. You can + check the status of the stack via the DescribeStacks API. + Currently, the limit for stacks is 20 stacks per account per + region. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags) + body = self._do_request('CreateStack', params, '/', 'POST') + return body['CreateStackResponse']['CreateStackResult']['StackId'] def update_stack(self, stack_name, template_body=None, template_url=None, - parameters=[], notification_arns=[], disable_rollback=False, - timeout_in_minutes=None, capabilities=None, tags=None): + parameters=None, notification_arns=None, disable_rollback=False, + timeout_in_minutes=None, capabilities=None, tags=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None, + stack_policy_body=None, stack_policy_url=None): """ - Updates a CloudFormation Stack as specified by the template. + Updates a stack as specified in the template. After the call + completes successfully, the stack update starts. You can check + the status of the stack via the DescribeStacks action. + + + + **Note: **You cannot update `AWS::S3::Bucket`_ resources, for + example, to add or modify tags. + + + + To get a copy of the template for an existing stack, you can + use the GetTemplate action. + + Tags that were associated with this stack during creation time + will still be associated with the stack after an `UpdateStack` + operation. + + For more information about creating an update template, + updating a stack, and monitoring the progress of the update, + see `Updating a Stack`_. :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks. + :param stack_name: + The name or stack ID of the stack to update. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. :type template_body: string - :param template_body: The template body (JSON string) + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence. - - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. - - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). :type disable_rollback: bool :param disable_rollback: Indicates whether or not to rollback on failure. - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. :type capabilities: list :param capabilities: The list of capabilities you want to allow in @@ -250,38 +456,86 @@ 'CAPABILITY_IAM'. :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. :rtype: string :return: The unique Stack ID. """ - params = self._build_create_or_update_params(stack_name, - template_body, template_url, parameters, notification_arns, - disable_rollback, timeout_in_minutes, capabilities, tags) - response = self.make_request('UpdateStack', params, '/', 'POST') - body = response.read() - if response.status == 200: - body = json.loads(body) - return body['UpdateStackResponse']['UpdateStackResult']['StackId'] - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, None, stack_policy_body, + stack_policy_url, tags, stack_policy_during_update_body, + stack_policy_during_update_url) + body = self._do_request('UpdateStack', params, '/', 'POST') + return body['UpdateStackResponse']['UpdateStackResult']['StackId'] def delete_stack(self, stack_name_or_id): + """ + Deletes a specified stack. Once the call completes + successfully, stack deletion starts. Deleted stacks do not + show up in the DescribeStacks API if the deletion has been + completed successfully. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id} - # TODO: change this to get_status ? - response = self.make_request('DeleteStack', params, '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('DeleteStack', params, '/', 'GET') def describe_stack_events(self, stack_name_or_id=None, next_token=None): + """ + Returns all stack related events for a specified stack. For + more information about a stack's event history, go to + `Stacks`_ in the AWS CloudFormation User Guide. + Events are returned, even if the stack never existed or has + been successfully deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + events, if there is one. + Default: There is no default value. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id @@ -291,21 +545,82 @@ StackEvent)]) def describe_stack_resource(self, stack_name_or_id, logical_resource_id): + """ + Returns a description of the specified resource in the + specified stack. + + For deleted stacks, DescribeStackResource returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id, 'LogicalResourceId': logical_resource_id} - response = self.make_request('DescribeStackResource', params, - '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('DescribeStackResource', params, '/', 'GET') def describe_stack_resources(self, stack_name_or_id=None, logical_resource_id=None, physical_resource_id=None): + """ + Returns AWS resource descriptions for running and deleted + stacks. If `StackName` is specified, all the associated + resources that are part of the stack are returned. If + `PhysicalResourceId` is specified, the associated resources of + the stack that the resource belongs to are returned. + Only the first 100 resources will be returned. If your stack + has more resources than this, you should use + `ListStackResources` instead. + For deleted stacks, `DescribeStackResources` returns resource + information for up to 90 days after the stack has been + deleted. + + You must specify either `StackName` or `PhysicalResourceId`, + but not both. In addition, you can specify `LogicalResourceId` + to filter the returned result. For more information about + resources, the `LogicalResourceId` and `PhysicalResourceId`, + go to the `AWS CloudFormation User Guide`_. + A `ValidationError` is returned if you specify both + `StackName` and `PhysicalResourceId` in the same request. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Required: Conditional. If you do not specify `StackName`, you must + specify `PhysicalResourceId`. + + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + :type physical_resource_id: string + :param physical_resource_id: The name or unique identifier that + corresponds to a physical instance ID of a resource supported by + AWS CloudFormation. + For example, for an Amazon Elastic Compute Cloud (EC2) instance, + `PhysicalResourceId` corresponds to the `InstanceId`. You can pass + the EC2 `InstanceId` to `DescribeStackResources` to find which + stack the instance belongs to and what other resources are part of + the stack. + + Required: Conditional. If you do not specify `PhysicalResourceId`, you + must specify `StackName`. + + Default: There is no default value. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id @@ -316,35 +631,110 @@ return self.get_list('DescribeStackResources', params, [('member', StackResource)]) - def describe_stacks(self, stack_name_or_id=None): + def describe_stacks(self, stack_name_or_id=None, next_token=None): + """ + Returns the description for the specified stack; if no stack + name was specified, then it returns the description for all + the stacks created. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id + if next_token is not None: + params['NextToken'] = next_token return self.get_list('DescribeStacks', params, [('member', Stack)]) def get_template(self, stack_name_or_id): + """ + Returns the template body for a specified stack. You can get + the template for running or deleted stacks. + + For deleted stacks, GetTemplate returns the template for up to + 90 days after the stack has been deleted. + If the template does not exist, a `ValidationError` is + returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id} - response = self.make_request('GetTemplate', params, '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('GetTemplate', params, '/', 'GET') def list_stack_resources(self, stack_name_or_id, next_token=None): + """ + Returns descriptions of all resources of the specified stack. + + For deleted stacks, ListStackResources returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stack resource summaries, if there is one. + Default: There is no default value. + + """ params = {'StackName': stack_name_or_id} if next_token: params['NextToken'] = next_token return self.get_list('ListStackResources', params, [('member', StackResourceSummary)]) - def list_stacks(self, stack_status_filters=[], next_token=None): + def list_stacks(self, stack_status_filters=None, next_token=None): + """ + Returns the summary information for stacks whose status + matches the specified StackStatusFilter. Summary information + for stacks that have been deleted is kept for 90 days after + the stack is deleted. If no StackStatusFilter is specified, + summary information for all stacks is returned (including + existing stacks and stacks that have been deleted). + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + Default: There is no default value. + + :type stack_status_filter: list + :param stack_status_filter: Stack status to use as a filter. Specify + one or more stack status codes to list only stacks with the + specified status codes. For a complete list of stack status codes, + see the `StackStatus` parameter of the Stack data type. + + """ params = {} if next_token: params['NextToken'] = next_token - if len(stack_status_filters) > 0: + if stack_status_filters and len(stack_status_filters) > 0: self.build_list_params(params, stack_status_filters, "StackStatusFilter.member") @@ -352,6 +742,25 @@ [('member', StackSummary)]) def validate_template(self, template_body=None, template_url=None): + """ + Validates a specified template. + + :type template_body: string + :param template_body: String containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + """ params = {} if template_body: params['TemplateBody'] = template_body @@ -364,7 +773,116 @@ verb="POST") def cancel_update_stack(self, stack_name_or_id=None): + """ + Cancels an update on the specified stack. If the call + completes successfully, the stack will roll back the update + and revert to the previous stack configuration. + Only stacks that are in the UPDATE_IN_PROGRESS state can be + canceled. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated with + the stack. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id return self.get_status('CancelUpdateStack', params) + + def estimate_template_cost(self, template_body=None, template_url=None, + parameters=None): + """ + Returns the estimated monthly cost of a template. The return + value is an AWS Simple Monthly Calculator URL with a query + string that describes the resources required to run the + template. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the template. + + :rtype: string + :returns: URL to pre-filled cost calculator + """ + params = {'ContentType': "JSON"} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters and len(parameters) > 0: + for i, (key, value) in enumerate(parameters): + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + response = self._do_request('EstimateTemplateCost', params, '/', 'POST') + return response['EstimateTemplateCostResponse']\ + ['EstimateTemplateCostResult']\ + ['Url'] + + def get_stack_policy(self, stack_name_or_id): + """ + Returns the stack policy for a specified stack. If a stack + doesn't have a policy, a null value is returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that is associated with + the stack whose policy you want to get. + + :rtype: string + :return: The policy JSON document + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + response = self._do_request('GetStackPolicy', params, '/', 'POST') + return response['GetStackPolicyResponse']\ + ['GetStackPolicyResult']\ + ['StackPolicyBody'] + + def set_stack_policy(self, stack_name_or_id, stack_policy_body=None, + stack_policy_url=None): + """ + Sets a stack policy for a specified stack. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that you want to + associate a policy with. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + + response = self._do_request('SetStackPolicy', params, '/', 'POST') + return response['SetStackPolicyResponse']\ + ['SetStackPolicyResult'] diff -Nru python-boto-2.20.1/boto/cloudformation/__init__.py python-boto-2.29.1/boto/cloudformation/__init__.py --- python-boto-2.20.1/boto/cloudformation/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudformation/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,18 +21,9 @@ # IN THE SOFTWARE. from connection import CloudFormationConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions -RegionData = { - 'us-east-1': 'cloudformation.us-east-1.amazonaws.com', - 'us-west-1': 'cloudformation.us-west-1.amazonaws.com', - 'us-west-2': 'cloudformation.us-west-2.amazonaws.com', - 'sa-east-1': 'cloudformation.sa-east-1.amazonaws.com', - 'eu-west-1': 'cloudformation.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'cloudformation.ap-southeast-2.amazonaws.com', -} +RegionData = load_regions().get('cloudformation') def regions(): @@ -42,13 +33,10 @@ :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=CloudFormationConnection) - regions.append(region) - return regions + return get_regions( + 'cloudformation', + connection_cls=CloudFormationConnection + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/cloudformation/stack.py python-boto-2.29.1/boto/cloudformation/stack.py --- python-boto-2.20.1/boto/cloudformation/stack.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudformation/stack.py 2014-05-30 20:49:34.000000000 +0000 @@ -107,6 +107,35 @@ def get_template(self): return self.connection.get_template(stack_name_or_id=self.stack_id) + def get_policy(self): + """ + Returns the stack policy for this stack. If it has no policy + then, a null value is returned. + """ + return self.connection.get_stack_policy(self.stack_id) + + def set_policy(self, stack_policy_body=None, stack_policy_url=None): + """ + Sets a stack policy for this stack. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + """ + return self.connection.set_stack_policy(self.stack_id, + stack_policy_body=stack_policy_body, + stack_policy_url=stack_policy_url) + class StackSummary(object): def __init__(self, connection=None): diff -Nru python-boto-2.20.1/boto/cloudformation/template.py python-boto-2.29.1/boto/cloudformation/template.py --- python-boto-2.20.1/boto/cloudformation/template.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudformation/template.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,25 +1,33 @@ from boto.resultset import ResultSet +from boto.cloudformation.stack import Capability -class Template: +class Template(object): def __init__(self, connection=None): self.connection = connection self.description = None self.template_parameters = None + self.capabilities_reason = None + self.capabilities = None def startElement(self, name, attrs, connection): if name == "Parameters": self.template_parameters = ResultSet([('member', TemplateParameter)]) return self.template_parameters + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities else: return None def endElement(self, name, value, connection): if name == "Description": self.description = value + elif name == "CapabilitiesReason": + self.capabilities_reason = value else: setattr(self, name, value) -class TemplateParameter: +class TemplateParameter(object): def __init__(self, parent): self.parent = parent self.default_value = None diff -Nru python-boto-2.20.1/boto/cloudfront/distribution.py python-boto-2.29.1/boto/cloudfront/distribution.py --- python-boto-2.20.1/boto/cloudfront/distribution.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/distribution.py 2014-05-30 20:49:34.000000000 +0000 @@ -103,6 +103,9 @@ self.logging = logging self.default_root_object = default_root_object + def __repr__(self): + return "DistributionConfig:%s" % self.origin + def to_xml(self): s = '\n' s += '\n' @@ -176,7 +179,7 @@ def __init__(self, connection=None, origin='', enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, logging=None): - DistributionConfig.__init__(self, connection=connection, + super(StreamingDistributionConfig, self).__init__(connection=connection, origin=origin, enabled=enabled, caller_reference=caller_reference, cnames=cnames, comment=comment, @@ -234,6 +237,9 @@ self.etag = None self.streaming = False + def __repr__(self): + return "DistributionSummary:%s" % self.domain_name + def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() @@ -295,6 +301,9 @@ self._bucket = None self._object_class = Object + def __repr__(self): + return "Distribution:%s" % self.domain_name + def startElement(self, name, attrs, connection): if name == 'DistributionConfig': self.config = DistributionConfig() @@ -350,11 +359,11 @@ self.config.cnames, self.config.comment, self.config.trusted_signers, self.config.default_root_object) - if enabled != None: + if enabled is not None: new_config.enabled = enabled - if cnames != None: + if cnames is not None: new_config.cnames = cnames - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) self.config = new_config @@ -684,8 +693,8 @@ def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): - Distribution.__init__(self, connection, config, domain_name, - id, last_modified_time, status) + super(StreamingDistribution, self).__init__(connection, config, + domain_name, id, last_modified_time, status) self._object_class = StreamingObject def startElement(self, name, attrs, connection): @@ -693,7 +702,8 @@ self.config = StreamingDistributionConfig() return self.config else: - return Distribution.startElement(self, name, attrs, connection) + return super(StreamingDistribution, self).startElement(name, attrs, + connection) def update(self, enabled=None, cnames=None, comment=None): """ @@ -729,11 +739,11 @@ self.config.cnames, self.config.comment, self.config.trusted_signers) - if enabled != None: + if enabled is not None: new_config.enabled = enabled - if cnames != None: + if cnames is not None: new_config.cnames = cnames - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_streaming_distribution_config(self.id, self.etag, diff -Nru python-boto-2.20.1/boto/cloudfront/identity.py python-boto-2.29.1/boto/cloudfront/identity.py --- python-boto-2.20.1/boto/cloudfront/identity.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/identity.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,15 +14,14 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import uuid -class OriginAccessIdentity: - +class OriginAccessIdentity(object): def __init__(self, connection=None, config=None, id='', s3_user_id='', comment=''): self.connection = connection @@ -31,7 +30,7 @@ self.s3_user_id = s3_user_id self.comment = comment self.etag = None - + def startElement(self, name, attrs, connection): if name == 'CloudFrontOriginAccessIdentityConfig': self.config = OriginAccessIdentityConfig() @@ -53,7 +52,7 @@ new_config = OriginAccessIdentityConfig(self.connection, self.config.caller_reference, self.config.comment) - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) self.config = new_config @@ -63,9 +62,9 @@ def uri(self): return 'origin-access-identity/cloudfront/%s' % self.id - -class OriginAccessIdentityConfig: + +class OriginAccessIdentityConfig(object): def __init__(self, connection=None, caller_reference='', comment=''): self.connection = connection if caller_reference: @@ -94,8 +93,8 @@ else: setattr(self, name, value) -class OriginAccessIdentitySummary: +class OriginAccessIdentitySummary(object): def __init__(self, connection=None, id='', s3_user_id='', comment=''): self.connection = connection @@ -103,7 +102,7 @@ self.s3_user_id = s3_user_id self.comment = comment self.etag = None - + def startElement(self, name, attrs, connection): return None @@ -119,4 +118,4 @@ def get_origin_access_identity(self): return self.connection.get_origin_access_identity_info(self.id) - + diff -Nru python-boto-2.20.1/boto/cloudfront/__init__.py python-boto-2.29.1/boto/cloudfront/__init__.py --- python-boto-2.20.1/boto/cloudfront/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -43,12 +43,14 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, - validate_certs=True): - AWSAuthConnection.__init__(self, host, + validate_certs=True, profile_name=None, https_connection_factory=None): + super(CloudFrontConnection, self).__init__(host, aws_access_key_id, aws_secret_access_key, True, port, proxy, proxy_port, debug=debug, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) def get_etag(self, response): response_headers = response.msg diff -Nru python-boto-2.20.1/boto/cloudfront/invalidation.py python-boto-2.29.1/boto/cloudfront/invalidation.py --- python-boto-2.20.1/boto/cloudfront/invalidation.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/invalidation.py 2014-05-30 20:49:34.000000000 +0000 @@ -75,7 +75,7 @@ def to_xml(self): """Get this batch as XML""" - assert self.connection != None + assert self.connection is not None s = '\n' s += '\n' % self.connection.Version for p in self.paths: diff -Nru python-boto-2.20.1/boto/cloudfront/object.py python-boto-2.29.1/boto/cloudfront/object.py --- python-boto-2.20.1/boto/cloudfront/object.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/object.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -24,7 +24,7 @@ class Object(Key): def __init__(self, bucket, name=None): - Key.__init__(self, bucket, name=name) + super(Object, self).__init__(bucket, name=name) self.distribution = bucket.distribution def __repr__(self): @@ -43,6 +43,6 @@ class StreamingObject(Object): def url(self, scheme='rtmp'): - return Object.url(self, scheme) + return super(StreamingObject, self).url(scheme) + - diff -Nru python-boto-2.20.1/boto/cloudfront/signers.py python-boto-2.29.1/boto/cloudfront/signers.py --- python-boto-2.20.1/boto/cloudfront/signers.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudfront/signers.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,17 +14,16 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class Signer: - +class Signer(object): def __init__(self): self.id = None self.key_pair_ids = [] - + def startElement(self, name, attrs, connection): return None @@ -35,9 +34,9 @@ self.id = value elif name == 'KeyPairId': self.key_pair_ids.append(value) - -class ActiveTrustedSigners(list): + +class ActiveTrustedSigners(list): def startElement(self, name, attrs, connection): if name == 'Signer': s = Signer() @@ -47,8 +46,8 @@ def endElement(self, name, value, connection): pass -class TrustedSigners(list): +class TrustedSigners(list): def startElement(self, name, attrs, connection): return None diff -Nru python-boto-2.20.1/boto/cloudsearch/__init__.py python-boto-2.29.1/boto/cloudsearch/__init__.py --- python-boto-2.20.1/boto/cloudsearch/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,23 +32,10 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.cloudsearch.layer1 - return [RegionInfo(name='us-east-1', - endpoint='cloudsearch.us-east-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='eu-west-1', - endpoint='cloudsearch.eu-west-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='us-west-1', - endpoint='cloudsearch.us-west-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='us-west-2', - endpoint='cloudsearch.us-west-2.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='ap-southeast-1', - endpoint='cloudsearch.ap-southeast-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - - ] + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch.layer1.Layer1 + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/cloudsearch/layer1.py python-boto-2.29.1/boto/cloudsearch/layer1.py --- python-boto-2.20.1/boto/cloudsearch/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -46,7 +46,7 @@ proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -66,7 +66,8 @@ https_connection_factory=https_connection_factory, path=path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -88,7 +89,7 @@ for p in doc_path: inner = inner.get(p) if not inner: - return None if list_marker == None else [] + return None if list_marker is None else [] if isinstance(inner, list): return inner else: diff -Nru python-boto-2.20.1/boto/cloudsearch2/document.py python-boto-2.29.1/boto/cloudsearch2/document.py --- python-boto-2.20.1/boto/cloudsearch2/document.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/document.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,261 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document + Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send + the commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + def add(self, _id, fields): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + """ + + d = {'type': 'add', 'id': _id, 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until + :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + """ + + d = {'type': 'delete', 'id': _id} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be + reused for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably ' + 'raise 500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + api_version = '2013-01-01' + if self.domain: + api_version = self.domain.layer1.APIVersion + url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + r = session.post(url, data=sdf, + headers={'Content-Type': 'application/json'}) + + return CommitResponse(r, self, sdf) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch2.document.SearchServiceException` + :raises: :class:`boto.cloudsearch2.document.EncodingError` + :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + + try: + self.content = json.loads(response.content) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}' + '\n\nSDF:\n{1}'.format(response.content, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=response.content) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', + [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch2.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + boto.log.debug(self.response.content) + raise CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}' + .format(type_, commit_num, response_num)) diff -Nru python-boto-2.20.1/boto/cloudsearch2/domain.py python-boto-2.29.1/boto/cloudsearch2/domain.py --- python-boto-2.20.1/boto/cloudsearch2/domain.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/domain.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,542 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from .optionstatus import IndexFieldStatus +from .optionstatus import ServicePoliciesStatus +from .optionstatus import ExpressionStatus +from .optionstatus import AvailabilityOptionsStatus +from .optionstatus import ScalingParametersStatus +from .document import DocumentServiceConnection +from .search import SearchConnection + + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + """ + Constructor - Create a domain object from a layer1 and data params + + :type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object + :param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object + which is used to perform operations on the domain. + """ + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['Created'] + self.deleted = data['Deleted'] + self.processing = data['Processing'] + self.requires_index_documents = data['RequiresIndexDocuments'] + self.domain_id = data['DomainId'] + self.domain_name = data['DomainName'] + self.search_instance_count = data['SearchInstanceCount'] + self.search_instance_type = data.get('SearchInstanceType', None) + self.search_partition_count = data['SearchPartitionCount'] + self._doc_service = data['DocService'] + self._service_arn = data['ARN'] + self._search_service = data['SearchService'] + + @property + def service_arn(self): + return self._service_arn + + @property + def doc_service_endpoint(self): + return self._doc_service['Endpoint'] + + @property + def search_service_endpoint(self): + return self._search_service['Endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_analysis_schemes(self): + """ + Return a list of Analysis Scheme objects. + """ + return self.layer1.describe_analysis_schemes(self.name) + + def get_availability_options(self): + """ + Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object representing the currently defined availability options for + the domain. + :return: OptionsStatus object + :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object + """ + return AvailabilityOptionsStatus( + self, refresh_fn=self.layer1.describe_availability_options, + refresh_key=['DescribeAvailabilityOptionsResponse', + 'DescribeAvailabilityOptionsResult', + 'AvailabilityOptions'], + save_fn=self.layer1.update_availability_options) + + def get_scaling_options(self): + """ + Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object representing the currently defined scaling options for the + domain. + :return: ScalingParametersStatus object + :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object + """ + return ScalingParametersStatus( + self, refresh_fn=self.layer1.describe_scaling_parameters, + refresh_key=['DescribeScalingParametersResponse', + 'DescribeScalingParametersResult', + 'ScalingParameters'], + save_fn=self.layer1.update_scaling_parameters) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus` + object representing the currently defined access policies for the + domain. + :return: ServicePoliciesStatus object + :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object + """ + return ServicePoliciesStatus( + self, refresh_fn=self.layer1.describe_service_access_policies, + refresh_key=['DescribeServiceAccessPoliciesResponse', + 'DescribeServiceAccessPoliciesResult', + 'AccessPolicies'], + save_fn=self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptionState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + :return: list of IndexFieldStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` + object + """ + data = self.layer1.describe_index_fields(self.name, field_names) + + data = (data['DescribeIndexFieldsResponse'] + ['DescribeIndexFieldsResult'] + ['IndexFields']) + + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, returnable=False, + searchable=False, sortable=False, + highlight=False, source_field=None, + analysis_scheme=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + int | double | literal | text | date | latlon | + int-array | double-array | literal-array | text-array | date-array + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``int`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``int, int-array, text, text-array``. + + :type returnable: bool + :param returnable: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. + + :type sortable: bool + :param sortable: A boolean to indicate whether sorting + is enabled for this field or not. Does not apply to + fields of array types. + + :type highlight: bool + :param highlight: A boolean to indicate whether highlighting + is enabled for this field or not. Does not apply to + fields of type ``double, int, date, latlon`` + + :type source_field: list of strings or string + :param source_field: For array types, this is the list of fields + to treat as the source. For singular types, pass a string only. + + :type analysis_scheme: string + :param analysis_scheme: The analysis scheme to use for this field. + Only applies to ``text | text-array`` field types + + :return: IndexFieldStatus objects + :rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + index = { + 'IndexFieldName': field_name, + 'IndexFieldType': field_type + } + if field_type == 'literal': + index['LiteralOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LiteralOptions']['DefaultValue'] = default + if source_field: + index['LiteralOptions']['SourceField'] = source_field + elif field_type == 'literal-array': + index['LiteralArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['LiteralArrayOptions']['DefaultValue'] = default + if source_field: + index['LiteralArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'int': + index['IntOptions'] = { + 'DefaultValue': default, + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['IntOptions']['DefaultValue'] = default + if source_field: + index['IntOptions']['SourceField'] = source_field + elif field_type == 'int-array': + index['IntArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['IntArrayOptions']['DefaultValue'] = default + if source_field: + index['IntArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'date': + index['DateOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DateOptions']['DefaultValue'] = default + if source_field: + index['DateOptions']['SourceField'] = source_field + elif field_type == 'date-array': + index['DateArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DateArrayOptions']['DefaultValue'] = default + if source_field: + index['DateArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'double': + index['DoubleOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DoubleOptions']['DefaultValue'] = default + if source_field: + index['DoubleOptions']['SourceField'] = source_field + elif field_type == 'double-array': + index['DoubleArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DoubleArrayOptions']['DefaultValue'] = default + if source_field: + index['DoubleArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'text': + index['TextOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight, + 'SortEnabled': sortable + } + if default: + index['TextOptions']['DefaultValue'] = default + if source_field: + index['TextOptions']['SourceField'] = source_field + if analysis_scheme: + index['TextOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'text-array': + index['TextArrayOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight + } + if default: + index['TextArrayOptions']['DefaultValue'] = default + if source_field: + index['TextArrayOptions']['SourceFields'] = \ + ','.join(source_field) + if analysis_scheme: + index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'latlon': + index['LatLonOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LatLonOptions']['DefaultValue'] = default + if source_field: + index['LatLonOptions']['SourceField'] = source_field + + data = self.layer1.define_index_field(self.name, index) + + data = (data['DefineIndexFieldResponse'] + ['DefineIndexFieldResult'] + ['IndexField']) + + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_expressions(self, names=None): + """ + Return a list of rank expressions defined for this domain. + :return: list of ExpressionStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` + object + """ + fn = self.layer1.describe_expressions + data = fn(self.name, names) + + data = (data['DescribeExpressionsResponse'] + ['DescribeExpressionsResult'] + ['Expressions']) + + return [ExpressionStatus(self, d, fn) for d in data] + + def create_expression(self, name, value): + """ + Create a new expression. + + :type name: string + :param name: The name of an expression for processing + during a search request. + + :type value: string + :param value: The expression to evaluate for ranking + or thresholding while processing a search request. The + Expression syntax is based on JavaScript expressions + and supports: + + * Single value, sort enabled numeric fields (int, double, date) + * Other expressions + * The _score variable, which references a document's relevance + score + * The _time variable, which references the current epoch time + * Integer, floating point, hex, and octal literals + * Arithmetic operators: + - * / % + * Bitwise operators: | & ^ ~ << >> >>> + * Boolean operators (including the ternary operator): && || ! ?: + * Comparison operators: < <= == >= > + * Mathematical functions: abs ceil exp floor ln log2 log10 logn + max min pow sqrt pow + * Trigonometric functions: acos acosh asin asinh atan atan2 atanh + cos cosh sin sinh tanh tan + * The haversin distance function + + Expressions always return an integer value from 0 to the maximum + 64-bit signed integer value (2^63 - 1). Intermediate results are + calculated as double-precision floating point values and the return + value is rounded to the nearest integer. If the expression is + invalid or evaluates to a negative value, it returns 0. If the + expression evaluates to a value greater than the maximum, it + returns the maximum value. + + The source data for an Expression can be the name of an + IndexField of type int or double, another Expression or the + reserved name _score. The _score source is + defined to return as a double from 0 to 10.0 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :return: ExpressionStatus object + :rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_expression(self.name, name, value) + + data = (data['DefineExpressionResponse'] + ['DefineExpressionResult'] + ['Expression']) + + return ExpressionStatus(self, data, + self.layer1.describe_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name diff -Nru python-boto-2.20.1/boto/cloudsearch2/exceptions.py python-boto-2.29.1/boto/cloudsearch2/exceptions.py --- python-boto-2.20.1/boto/cloudsearch2/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/exceptions.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,46 @@ +""" +Exceptions that are specific to the cloudsearch2 module. +""" +from boto.exception import BotoServerError + + +class InvalidTypeException(BotoServerError): + """ + Raised when an invalid record type is passed to CloudSearch. + """ + pass + + +class LimitExceededException(BotoServerError): + """ + Raised when a limit has been exceeded. + """ + pass + + +class InternalException(BotoServerError): + """ + A generic server-side error. + """ + pass + + +class DisabledOperationException(BotoServerError): + """ + Raised when an operation has been disabled. + """ + pass + + +class ResourceNotFoundException(BotoServerError): + """ + Raised when a requested resource does not exist. + """ + pass + + +class BaseException(BotoServerError): + """ + A generic server-side error. + """ + pass diff -Nru python-boto-2.20.1/boto/cloudsearch2/__init__.py python-boto-2.29.1/boto/cloudsearch2/__init__.py --- python-boto-2.20.1/boto/cloudsearch2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,42 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.cloudsearch2.layer1 + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch2.layer1.CloudSearchConnection + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.20.1/boto/cloudsearch2/layer1.py python-boto-2.29.1/boto/cloudsearch2/layer1.py --- python-boto-2.20.1/boto/cloudsearch2/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,784 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudsearch2 import exceptions + + +class CloudSearchConnection(AWSQueryConnection): + """ + Amazon CloudSearch Configuration Service + You use the Amazon CloudSearch configuration service to create, + configure, and manage search domains. Configuration service + requests are submitted using the AWS Query protocol. AWS Query + requests are HTTP or HTTPS requests submitted via HTTP GET or POST + with a query parameter named Action. + + The endpoint for configuration service requests is region- + specific: cloudsearch. region .amazonaws.com. For example, + cloudsearch.us-east-1.amazonaws.com. For a current list of + supported regions and endpoints, see `Regions and Endpoints`_. + """ + APIVersion = "2013-01-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidTypeException": exceptions.InvalidTypeException, + "LimitExceededException": exceptions.LimitExceededException, + "InternalException": exceptions.InternalException, + "DisabledOperationException": exceptions.DisabledOperationException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "BaseException": exceptions.BaseException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudSearchConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_suggesters(self, domain_name): + """ + Indexes the search suggestions. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='BuildSuggesters', + verb='POST', + path='/', params=params) + + def create_domain(self, domain_name): + """ + Creates a new search domain. For more information, see + `Creating a Search Domain`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A name for the domain you are creating. Allowed + characters are a-z (lower-case letters), 0-9, and hyphen (-). + Domain names must start with a letter or number and be at least 3 + and no more than 28 characters long. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='CreateDomain', + verb='POST', + path='/', params=params) + + def define_analysis_scheme(self, domain_name, analysis_scheme): + """ + Configures an analysis scheme for a domain. An analysis scheme + defines language-specific text processing options for a `text` + field. For more information, see `Configuring Analysis + Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme: dict + :param analysis_scheme: Configuration information for an analysis + scheme. Each analysis scheme has a unique name and specifies the + language of the text to be processed. The following options can be + configured for an analysis scheme: `Synonyms`, `Stopwords`, + `StemmingDictionary`, and `AlgorithmicStemming`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'AnalysisScheme', + analysis_scheme) + return self._make_request( + action='DefineAnalysisScheme', + verb='POST', + path='/', params=params) + + def define_expression(self, domain_name, expression): + """ + Configures an `Expression` for the search domain. Used to + create new expressions and modify existing ones. If the + expression exists, the new configuration replaces the old one. + For more information, see `Configuring Expressions`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression: dict + :param expression: A named expression that can be evaluated at search + time. Can be used for sorting and filtering search results and + constructing other expressions. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Expression', + expression) + return self._make_request( + action='DefineExpression', + verb='POST', + path='/', params=params) + + def define_index_field(self, domain_name, index_field): + """ + Configures an `IndexField` for the search domain. Used to + create new fields and modify existing ones. You must specify + the name of the domain you are configuring and an index field + configuration. The index field configuration specifies a + unique name, the index field type, and the options you want to + configure for the field. The options you can specify depend on + the `IndexFieldType`. If the field exists, the new + configuration replaces the old one. For more information, see + `Configuring Index Fields`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field: dict + :param index_field: The index field and field options you want to + configure. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'IndexField', + index_field) + return self._make_request( + action='DefineIndexField', + verb='POST', + path='/', params=params) + + def define_suggester(self, domain_name, suggester): + """ + Configures a suggester for a domain. A suggester enables you + to display possible matches before users finish typing their + queries. When you configure a suggester, you must specify the + name of the text field you want to search for possible matches + and a unique name for the suggester. For more information, see + `Getting Search Suggestions`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester: dict + :param suggester: Configuration information for a search suggester. + Each suggester has a unique name and specifies the text field you + want to use for suggestions. The following options can be + configured for a suggester: `FuzzyMatching`, `SortExpression`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Suggester', + suggester) + return self._make_request( + action='DefineSuggester', + verb='POST', + path='/', params=params) + + def delete_analysis_scheme(self, domain_name, analysis_scheme_name): + """ + Deletes an analysis scheme. For more information, see + `Configuring Analysis Schemes`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme_name: string + :param analysis_scheme_name: The name of the analysis scheme you want + to delete. + + """ + params = { + 'DomainName': domain_name, + 'AnalysisSchemeName': analysis_scheme_name, + } + return self._make_request( + action='DeleteAnalysisScheme', + verb='POST', + path='/', params=params) + + def delete_domain(self, domain_name): + """ + Permanently deletes a search domain and all of its data. Once + a domain has been deleted, it cannot be recovered. For more + information, see `Deleting a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to permanently + delete. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DeleteDomain', + verb='POST', + path='/', params=params) + + def delete_expression(self, domain_name, expression_name): + """ + Removes an `Expression` from the search domain. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression_name: string + :param expression_name: The name of the `Expression` to delete. + + """ + params = { + 'DomainName': domain_name, + 'ExpressionName': expression_name, + } + return self._make_request( + action='DeleteExpression', + verb='POST', + path='/', params=params) + + def delete_index_field(self, domain_name, index_field_name): + """ + Removes an `IndexField` from the search domain. For more + information, see `Configuring Index Fields`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field_name: string + :param index_field_name: The name of the index field your want to + remove from the domain's indexing options. + + """ + params = { + 'DomainName': domain_name, + 'IndexFieldName': index_field_name, + } + return self._make_request( + action='DeleteIndexField', + verb='POST', + path='/', params=params) + + def delete_suggester(self, domain_name, suggester_name): + """ + Deletes a suggester. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester_name: string + :param suggester_name: Specifies the name of the suggester you want to + delete. + + """ + params = { + 'DomainName': domain_name, + 'SuggesterName': suggester_name, + } + return self._make_request( + action='DeleteSuggester', + verb='POST', + path='/', params=params) + + def describe_analysis_schemes(self, domain_name, + analysis_scheme_names=None, deployed=None): + """ + Gets the analysis schemes configured for a domain. An analysis + scheme defines language-specific text processing options for a + `text` field. Can be limited to specific analysis schemes by + name. By default, shows all analysis schemes and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type analysis_scheme_names: list + :param analysis_scheme_names: The analysis schemes you want to + describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if analysis_scheme_names is not None: + self.build_list_params(params, + analysis_scheme_names, + 'AnalysisSchemeNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAnalysisSchemes', + verb='POST', + path='/', params=params) + + def describe_availability_options(self, domain_name, deployed=None): + """ + Gets the availability options configured for a domain. By + default, shows the configuration with any pending changes. Set + the `Deployed` option to `True` to show the active + configuration and exclude pending changes. For more + information, see `Configuring Availability Options`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAvailabilityOptions', + verb='POST', + path='/', params=params) + + def describe_domains(self, domain_names=None): + """ + Gets information about the search domains owned by this + account. Can be limited to specific domains. Shows all domains + by default. For more information, see `Getting Information + about a Search Domain`_ in the Amazon CloudSearch Developer + Guide . + + :type domain_names: list + :param domain_names: The names of the domains you want to include in + the response. + + """ + params = {} + if domain_names is not None: + self.build_list_params(params, + domain_names, + 'DomainNames.member') + return self._make_request( + action='DescribeDomains', + verb='POST', + path='/', params=params) + + def describe_expressions(self, domain_name, expression_names=None, + deployed=None): + """ + Gets the expressions configured for the search domain. Can be + limited to specific expressions by name. By default, shows all + expressions and includes any pending changes to the + configuration. Set the `Deployed` option to `True` to show the + active configuration and exclude pending changes. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type expression_names: list + :param expression_names: Limits the `DescribeExpressions` response to + the specified expressions. If not specified, all expressions are + shown. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if expression_names is not None: + self.build_list_params(params, + expression_names, + 'ExpressionNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeExpressions', + verb='POST', + path='/', params=params) + + def describe_index_fields(self, domain_name, field_names=None, + deployed=None): + """ + Gets information about the index fields configured for the + search domain. Can be limited to specific fields by name. By + default, shows all fields and includes any pending changes to + the configuration. Set the `Deployed` option to `True` to show + the active configuration and exclude pending changes. For more + information, see `Getting Domain Information`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type field_names: list + :param field_names: A list of the index fields you want to describe. If + not specified, information is returned for all configured index + fields. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if field_names is not None: + self.build_list_params(params, + field_names, + 'FieldNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeIndexFields', + verb='POST', + path='/', params=params) + + def describe_scaling_parameters(self, domain_name): + """ + Gets the scaling parameters configured for a domain. A + domain's scaling parameters specify the desired search + instance type and replication count. For more information, see + `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DescribeScalingParameters', + verb='POST', + path='/', params=params) + + def describe_service_access_policies(self, domain_name, deployed=None): + """ + Gets information about the access policies that control access + to the domain's document and search endpoints. By default, + shows the configuration with any pending changes. Set the + `Deployed` option to `True` to show the active configuration + and exclude pending changes. For more information, see + `Configuring Access for a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def describe_suggesters(self, domain_name, suggester_names=None, + deployed=None): + """ + Gets the suggesters configured for a domain. A suggester + enables you to display possible matches before users finish + typing their queries. Can be limited to specific suggesters by + name. By default, shows all suggesters and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type suggester_names: list + :param suggester_names: The suggesters you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if suggester_names is not None: + self.build_list_params(params, + suggester_names, + 'SuggesterNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeSuggesters', + verb='POST', + path='/', params=params) + + def index_documents(self, domain_name): + """ + Tells the search domain to start indexing its documents using + the latest indexing options. This operation must be invoked to + activate options whose OptionStatus is + `RequiresIndexDocuments`. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='IndexDocuments', + verb='POST', + path='/', params=params) + + def list_domain_names(self): + """ + Lists all search domains owned by an account. + + + """ + params = {} + return self._make_request( + action='ListDomainNames', + verb='POST', + path='/', params=params) + + def update_availability_options(self, domain_name, multi_az): + """ + Configures the availability options for a domain. Enabling the + Multi-AZ option expands an Amazon CloudSearch domain to an + additional Availability Zone in the same Region to increase + fault tolerance in the event of a service disruption. Changes + to the Multi-AZ option can take about half an hour to become + active. For more information, see `Configuring Availability + Options`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type multi_az: boolean + :param multi_az: You expand an existing search domain to a second + Availability Zone by setting the Multi-AZ option to true. + Similarly, you can turn off the Multi-AZ option to downgrade the + domain to a single Availability Zone by setting the Multi-AZ option + to `False`. + + """ + params = {'DomainName': domain_name, 'MultiAZ': multi_az, } + return self._make_request( + action='UpdateAvailabilityOptions', + verb='POST', + path='/', params=params) + + def update_scaling_parameters(self, domain_name, scaling_parameters): + """ + Configures scaling parameters for a domain. A domain's scaling + parameters specify the desired search instance type and + replication count. Amazon CloudSearch will still automatically + scale your domain based on the volume of data and traffic, but + not below the desired instance type and replication count. If + the Multi-AZ option is enabled, these values control the + resources used per Availability Zone. For more information, + see `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type scaling_parameters: dict + :param scaling_parameters: The desired instance type and desired number + of replicas of each index partition. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'ScalingParameters', + scaling_parameters) + return self._make_request( + action='UpdateScalingParameters', + verb='POST', + path='/', params=params) + + def update_service_access_policies(self, domain_name, access_policies): + """ + Configures the access rules that control access to the + domain's document and search endpoints. For more information, + see ` Configuring Access for an Amazon CloudSearch Domain`_. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type access_policies: string + :param access_policies: The access rules you want to configure. These + rules replace any existing rules. + + """ + params = { + 'DomainName': domain_name, + 'AccessPolicies': access_policies, + } + return self._make_request( + action='UpdateServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def build_complex_param(self, params, label, value): + """Serialize a structure. + + For example:: + + param_type = 'structure' + label = 'IndexField' + value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}} + + would result in the params dict being updated with these params:: + + IndexField.IndexFieldName = a + IndexField.IntOptions.DefaultValue = 5 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type label: str + :param label: String label for param key + + :type value: any + :param value: The value to serialize + """ + for k, v in value.items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + self.build_complex_param(params, label + '.' + k, v) + elif isinstance(v, bool): + params['%s.%s' % (label, k)] = v and 'true' or 'false' + else: + params['%s.%s' % (label, k)] = v + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff -Nru python-boto-2.20.1/boto/cloudsearch2/layer2.py python-boto-2.29.1/boto/cloudsearch2/layer2.py --- python-boto-2.20.1/boto/cloudsearch2/layer2.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/layer2.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,92 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from .layer1 import CloudSearchConnection +from .domain import Domain + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True): + + if type(region) in [str, unicode]: + import boto.cloudsearch2 + for region_info in boto.cloudsearch2.regions(): + if region_info.name == region: + region = region_info + break + + self.layer1 = CloudSearchConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs) + + def list_domains(self, domain_names=None): + """ + Return a list of objects for each domain defined in the + current account. + :rtype: list of :class:`boto.cloudsearch2.domain.Domain` + """ + domain_data = self.layer1.describe_domains(domain_names) + + domain_data = (domain_data['DescribeDomainsResponse'] + ['DescribeDomainsResult'] + ['DomainStatusList']) + + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding object. + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff -Nru python-boto-2.20.1/boto/cloudsearch2/optionstatus.py python-boto-2.29.1/boto/cloudsearch2/optionstatus.py --- python-boto-2.20.1/boto/cloudsearch2/optionstatus.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/optionstatus.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,233 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None, + save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.refresh_key = refresh_key + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['CreationDate'] + self.status = status['State'] + self.update_date = status['UpdateDate'] + self.update_version = int(status['UpdateVersion']) + + def _update_options(self, options): + if options: + self.update(options) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + + if data and self.refresh_key: + # Attempt to pull out the right nested bag of data + for key in self.refresh_key: + data = data[key] + if data: + self._update_status(data['Status']) + self._update_options(data['Options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + +class IndexFieldStatus(OptionStatus): + def save(self): + pass + + +class AvailabilityOptionsStatus(OptionStatus): + def save(self): + pass + + +class ScalingParametersStatus(IndexFieldStatus): + pass + + +class ExpressionStatus(IndexFieldStatus): + pass + + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect": "Allow", + "Action": "*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) diff -Nru python-boto-2.20.1/boto/cloudsearch2/search.py python-boto-2.29.1/boto/cloudsearch2/search.py --- python-boto-2.20.1/boto/cloudsearch2/search.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudsearch2/search.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,368 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import json +from math import ceil +import boto +from boto.compat import json +import requests + +SIMPLE = 'simple' +STRUCTURED = 'structured' +LUCENE = 'lucene' +DISMAX = 'dismax' + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['status']['rid'] + self.time_ms = attrs['status']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].iteritems(): + if 'buckets' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values.get('buckets', []))) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, parser=None, fq=None, expr=None, + return_fields=None, size=10, start=0, sort=None, + facet=None, highlight=None, partial=None, options=None): + + self.q = q + self.parser = parser + self.fq = fq + self.expr = expr or {} + self.sort = sort or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or {} + self.highlight = highlight or {} + self.partial = partial + self.options = options + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['q.parser'] = self.parser + + if self.fq: + params['fq'] = self.fq + + if self.expr: + for k, v in self.expr.iteritems(): + params['expr.%s' % k] = v + + if self.facet: + for k, v in self.facet.iteritems(): + if type(v) not in [str, unicode]: + v = json.dumps(v) + params['facet.%s' % k] = v + + if self.highlight: + for k, v in self.highlight.iteritems(): + params['highlight.%s' % k] = v + + if self.options: + params['options'] = self.options + + if self.return_fields: + params['return'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + self.session = requests.Session() + + if not endpoint: + self.endpoint = domain.search_service_endpoint + + def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, + partial=None, options=None): + return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields, + size=size, start=start, facet=facet, highlight=highlight, + sort=sort, partial=partial, options=options) + + def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, partial=None, + options=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type parser: string + :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax' + + :type fq: string + :param fq: The filter query to use. + + :type sort: List of strings + :param sort: A list of fields or rank expressions used to order the + search results. Order is handled by adding 'desc' or 'asc' after the field name. + ``['year desc', 'author asc']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: dict + :param facet: Dictionary of fields for which facets should be returned + The facet value is string of JSON options + ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}`` + + :type highlight: dict + :param highlight: Dictionary of fields for which highlights should be returned + The facet value is string of JSON options + ``{'genres': '{format:'text',max_phrases:2,pre_tag:'',post_tag:''}'}`` + + :type partial: bool + :param partial: Should partial results from a partioned service be returned if + one or more index partitions are unreachable. + + :type options: str + :param options: Options for the query parser specified in *parser*. + Specified as a string in JSON format. + ``{fields: ['title^5', 'description']}`` + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured') + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim with a max of 3 + + >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'}) + """ + + query = self.build_query(q=q, parser=parser, fq=fq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + highlight=highlight, sort=sort, + partial=partial, options=options) + return self(query) + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: search results + """ + api_version = '2013-01-01' + if self.domain: + api_version = self.domain.layer1.APIVersion + url = "http://%s/%s/search" % (self.endpoint, api_version) + params = query.to_params() + + r = self.session.get(url, params=params) + try: + data = json.loads(r.content) + except ValueError, e: + if r.status_code == 403: + msg = '' + import re + g = re.search('

403 Forbidden

([^<]+)<', r.content) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % r.content, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits diff -Nru python-boto-2.20.1/boto/cloudtrail/__init__.py python-boto-2.29.1/boto/cloudtrail/__init__.py --- python-boto-2.20.1/boto/cloudtrail/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudtrail/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,14 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.cloudtrail.layer1 import CloudTrailConnection - - return [RegionInfo(name='us-east-1', - endpoint='cloudtrail.us-east-1.amazonaws.com', - connection_cls=CloudTrailConnection), - RegionInfo(name='us-west-2', - endpoint='cloudtrail.us-west-2.amazonaws.com', - connection_cls=CloudTrailConnection), - ] + return get_regions('cloudtrail', connection_cls=CloudTrailConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/cloudtrail/layer1.py python-boto-2.29.1/boto/cloudtrail/layer1.py --- python-boto-2.20.1/boto/cloudtrail/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/cloudtrail/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -42,8 +42,8 @@ CloudTrail is a web service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start - time of the event, the source IP address, the request parameters, - and the response elements returned by the service. + time of the AWS API call, the source IP address, the request + parameters, and the response elements returned by the service. As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various @@ -52,11 +52,11 @@ programmatic access to AWSCloudTrail. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS - SDKs, including how to download and install them, see the Tools - for Amazon Web Services page. + SDKs, including how to download and install them, see the `Tools + for Amazon Web Services page`_. See the CloudTrail User Guide for information about the data that - is included with each event listed in the log files. + is included with each AWS API call listed in the log files. """ APIVersion = "2013-11-01" DefaultRegionName = "us-east-1" @@ -71,10 +71,9 @@ "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException, "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException, "InvalidTrailNameException": exceptions.InvalidTrailNameException, - "InternalErrorException": exceptions.InternalErrorException, + "TrailNotProvidedException": exceptions.TrailNotProvidedException, "TrailNotFoundException": exceptions.TrailNotFoundException, "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException, - "TrailNotProvidedException": exceptions.TrailNotProvidedException, "InvalidS3PrefixException": exceptions.InvalidS3PrefixException, "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException, "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException, @@ -90,75 +89,71 @@ if 'host' not in kwargs: kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(CloudTrailConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] - def create_trail(self, trail=None): + def create_trail(self, name=None, s3_bucket_name=None, + s3_key_prefix=None, sns_topic_name=None, + include_global_service_events=None, trail=None): """ - From the command line, use create-subscription. + From the command line, use `create-subscription`. Creates a trail that specifies the settings for delivery of - log data to an Amazon S3 bucket. The request includes a Trail - structure that specifies the following: - - - + Trail name. - + The name of the Amazon S3 bucket to which CloudTrail - delivers your log files. - + The name of the Amazon S3 key prefix that precedes each log - file. - + The name of the Amazon SNS topic that notifies you that a - new file is available in your bucket. - + Whether the log file should include events from global - services. Currently, the only events included in CloudTrail - log files are from IAM and AWS STS. - - - Returns the appropriate HTTP status code if successful. If - not, it returns either one of the CommonErrors or a - FrontEndException with one of the following error codes: - - **MaximumNumberOfTrailsExceeded** - - An attempt was made to create more trails than allowed. You - can only create one trail for each account in each region. - - **TrailAlreadyExists** + log data to an Amazon S3 bucket. - An attempt was made to create a trail with a name that already - exists. + Support for passing Trail as a parameter ends as early as + February 25, 2014. The request and response examples in this + topic show the use of parameters as well as a Trail object. + Until Trail is removed, you can use either Trail or the + parameter list. - **S3BucketDoesNotExist** - - Specified Amazon S3 bucket does not exist. - - **InsufficientS3BucketPolicy** - - Policy on Amazon S3 bucket does not permit CloudTrail to write - to your bucket. See the AWS CloudTrail User Guide for the - required bucket policy. - - **InsufficientSnsTopicPolicy** + :type name: string + :param name: Specifies the name of the trail. - The policy on Amazon SNS topic does not permit CloudTrail to - write to it. Can also occur when an Amazon SNS topic does not - exist. + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. :type trail: dict - :param trail: Contains the Trail structure that specifies the settings - for each trail. + :param trail: Support for passing a Trail object in the CreateTrail or + UpdateTrail actions will end as early as February 15, 2014. Instead + of the Trail object and its members, use the parameters listed for + these actions. """ params = {} + if name is not None: + params['Name'] = name + if s3_bucket_name is not None: + params['S3BucketName'] = s3_bucket_name + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events if trail is not None: params['trail'] = trail return self.make_request(action='CreateTrail', body=json.dumps(params)) - def delete_trail(self, name=None): + def delete_trail(self, name): """ Deletes a trail. @@ -166,19 +161,17 @@ :param name: The name of a trail to be deleted. """ - params = {} - if name is not None: - params['Name'] = name + params = {'Name': name, } return self.make_request(action='DeleteTrail', body=json.dumps(params)) def describe_trails(self, trail_name_list=None): """ Retrieves the settings for some or all trails associated with - an account. Returns a list of Trail structures in JSON format. + an account. :type trail_name_list: list - :param trail_name_list: The list of Trail object names. + :param trail_name_list: The list of trails. """ params = {} @@ -187,97 +180,153 @@ return self.make_request(action='DescribeTrails', body=json.dumps(params)) - def get_trail_status(self, name=None): + def get_trail_status(self, name): """ - Returns GetTrailStatusResult, which contains a JSON-formatted - list of information about the trail specified in the request. - JSON fields include information such as delivery errors, - Amazon SNS and Amazon S3 errors, and times that logging - started and stopped for each trail. + Returns a JSON-formatted list of information about the + specified trail. Fields include information on delivery + errors, Amazon SNS and Amazon S3 errors, and start and stop + logging times for each trail. + + The CloudTrail API is currently undergoing revision. This + action currently returns both new fields and fields slated for + removal from the API. The following lists indicate the plans + for each field: + + **List of Members Planned for Ongoing Support** + + + + IsLogging + + LatestDeliveryTime + + LatestNotificationTime + + StartLoggingTime + + StopLoggingTime + + LatestNotificationError + + LatestDeliveryError + + + **List of Members Scheduled for Removal** + + + + **LatestDeliveryAttemptTime**: Use LatestDeliveryTime + instead. + + **LatestNotificationAttemptTime**: Use + LatestNotificationTime instead. + + **LatestDeliveryAttemptSucceeded**: No replacement. See the + note following this list. + + **LatestNotificationAttemptSucceeded**: No replacement. See + the note following this list. + + **TimeLoggingStarted**: Use StartLoggingTime instead. + + **TimeLoggingStopped**: Use StopLoggingtime instead. + + + No replacements have been created for + LatestDeliveryAttemptSucceeded and + LatestNotificationAttemptSucceeded . Use LatestDeliveryError + and LatestNotificationError to evaluate success or failure of + log delivery or notification. Empty values returned for these + fields indicate success. An error in LatestDeliveryError + generally indicates either a missing bucket or insufficient + permissions to write to the bucket. Similarly, an error in + LatestNotificationError indicates either a missing topic or + insufficient permissions. :type name: string :param name: The name of the trail for which you are requesting the current status. """ - params = {} - if name is not None: - params['Name'] = name + params = {'Name': name, } return self.make_request(action='GetTrailStatus', body=json.dumps(params)) - def start_logging(self, name=None): + def start_logging(self, name): """ - Starts the processing of recording user activity events and - log file delivery for a trail. + Starts the recording of AWS API calls and log file delivery + for a trail. :type name: string - :param name: The name of the Trail for which CloudTrail logs events. + :param name: The name of the trail for which CloudTrail logs AWS API + calls. """ - params = {} - if name is not None: - params['Name'] = name + params = {'Name': name, } return self.make_request(action='StartLogging', body=json.dumps(params)) - def stop_logging(self, name=None): + def stop_logging(self, name): """ - Suspends the recording of user activity events and log file - delivery for the specified trail. Under most circumstances, - there is no need to use this action. You can update a trail - without stopping it first. This action is the only way to stop - logging activity. + Suspends the recording of AWS API calls and log file delivery + for the specified trail. Under most circumstances, there is no + need to use this action. You can update a trail without + stopping it first. This action is the only way to stop + recording. :type name: string - :param name: Communicates to CloudTrail the name of the Trail for which - to stop logging events. + :param name: Communicates to CloudTrail the name of the trail for which + to stop logging AWS API calls. """ - params = {} - if name is not None: - params['Name'] = name + params = {'Name': name, } return self.make_request(action='StopLogging', body=json.dumps(params)) - def update_trail(self, trail=None): + def update_trail(self, name=None, s3_bucket_name=None, + s3_key_prefix=None, sns_topic_name=None, + include_global_service_events=None, trail=None): """ - From the command line, use update-subscription. + From the command line, use `update-subscription`. Updates the settings that specify delivery of log files. Changes to a trail do not require stopping the CloudTrail - service. You can use this action to designate an existing - bucket for log delivery, or to create a new bucket and prefix. - If the existing bucket has previously been a target for - CloudTrail log files, an IAM policy exists for the bucket. If - you create a new bucket using UpdateTrail, you need to apply - the policy to the bucket using one of the means provided by - the Amazon S3 service. - - The request includes a Trail structure that specifies the - following: - - - + Trail name. - + The name of the Amazon S3 bucket to which CloudTrail - delivers your log files. - + The name of the Amazon S3 key prefix that precedes each log - file. - + The name of the Amazon SNS topic that notifies you that a - new file is available in your bucket. - + Whether the log file should include events from global - services, such as IAM or AWS STS. - - **CreateTrail** returns the appropriate HTTP status code if - successful. If not, it returns either one of the common errors - or one of the exceptions listed at the end of this page. + service. Use this action to designate an existing bucket for + log delivery. If the existing bucket has previously been a + target for CloudTrail log files, an IAM policy exists for the + bucket. + + Support for passing Trail as a parameter ends as early as + February 25, 2014. The request and response examples in this + topic show the use of parameters as well as a Trail object. + Until Trail is removed, you can use either Trail or the + parameter list. + + :type name: string + :param name: Specifies the name of the trail. + + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. :type trail: dict - :param trail: Represents the Trail structure that contains the - CloudTrail setting for an account. + :param trail: Support for passing a Trail object in the CreateTrail or + UpdateTrail actions will end as early as February 15, 2014. Instead + of the Trail object and its members, use the parameters listed for + these actions. """ params = {} + if name is not None: + params['Name'] = name + if s3_bucket_name is not None: + params['S3BucketName'] = s3_bucket_name + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events if trail is not None: params['trail'] = trail return self.make_request(action='UpdateTrail', diff -Nru python-boto-2.20.1/boto/compat.py python-boto-2.29.1/boto/compat.py --- python-boto-2.20.1/boto/compat.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/compat.py 2014-05-30 20:49:34.000000000 +0000 @@ -19,6 +19,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +import os + # This allows boto modules to say "from boto.compat import json". This is # preferred so that all modules don't have to repeat this idiom. @@ -26,3 +28,14 @@ import simplejson as json except ImportError: import json + + +# If running in Google App Engine there is no "user" and +# os.path.expanduser() will fail. Attempt to detect this case and use a +# no-op expanduser function in this case. +try: + os.path.expanduser('~') + expanduser = os.path.expanduser +except (AttributeError, ImportError): + # This is probably running on App Engine. + expanduser = (lambda x: x) diff -Nru python-boto-2.20.1/boto/connection.py python-boto-2.29.1/boto/connection.py --- python-boto-2.20.1/boto/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -45,6 +45,7 @@ from __future__ import with_statement import base64 +from datetime import datetime import errno import httplib import os @@ -373,7 +374,7 @@ val = self.headers[key] if isinstance(val, unicode): safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~' - self.headers[key] = urllib.quote_plus(val.encode('utf-8'), safe) + self.headers[key] = urllib.quote(val.encode('utf-8'), safe) connection._auth_handler.add_auth(self, **kwargs) @@ -423,7 +424,7 @@ https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, - validate_certs=True): + validate_certs=True, profile_name=None): """ :type host: str :param host: The host to make the connection to @@ -434,6 +435,10 @@ :keyword str aws_secret_access_key: Your AWS Secret Access Key (provided by Amazon). If none is specified, the value in your ``AWS_SECRET_ACCESS_KEY`` environmental variable is used. + :keyword str security_token: The security token associated with + temporary credentials issued by STS. Optional unless using + temporary credentials. If none is specified, the environment + variable ``AWS_SECURITY_TOKEN`` is used if defined. :type is_secure: boolean :param is_secure: Whether the connection is over SSL @@ -464,6 +469,10 @@ :type validate_certs: bool :param validate_certs: Controls whether SSL certificates will be validated or not. Defaults to True. + + :type profile_name: str + :param profile_name: Override usual Credentials section in config + file to use a named set of keys instead. """ self.suppress_consec_slashes = suppress_consec_slashes self.num_retries = 6 @@ -485,8 +494,11 @@ "support this feature are not available. Certificate " "validation is only supported when running under Python " "2.6 or later.") - self.ca_certificates_file = config.get_value( + certs_file = config.get_value( 'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + if certs_file == 'system': + certs_file = None + self.ca_certificates_file = certs_file if port: self.port = port else: @@ -542,7 +554,8 @@ self.provider = Provider(self._provider_type, aws_access_key_id, aws_secret_access_key, - security_token) + security_token, + profile_name) # Allow config file to override default host, port, and host header. if self.provider.host: @@ -559,6 +572,7 @@ host, config, self.provider, self._required_auth_capability()) if getattr(self, 'AuthServiceName', None) is not None: self.auth_service_name = self.AuthServiceName + self.request_hook = None def __repr__(self): return '%s:%s' % (self.__class__.__name__, self.host) @@ -599,6 +613,10 @@ gs_secret_access_key = aws_secret_access_key secret_key = aws_secret_access_key + def profile_name(self): + return self.provider.profile_name + profile_name = property(profile_name) + def get_path(self, path='/'): # The default behavior is to suppress consecutive slashes for reasons # discussed at @@ -680,7 +698,7 @@ self.proxy_port = self.port self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') - self.use_proxy = (self.proxy != None) + self.use_proxy = (self.proxy is not None) def get_http_connection(self, host, port, is_secure): conn = self._pool.get_http_connection(host, port, is_secure) @@ -806,9 +824,12 @@ h = httplib.HTTPConnection(host) if self.https_validate_certificates and HAVE_HTTPS_CONNECTION: - boto.log.debug("wrapping ssl socket for proxied connection; " - "CA certificate file=%s", - self.ca_certificates_file) + msg = "wrapping ssl socket for proxied connection; " + if self.ca_certificates_file: + msg += "CA certificate file=%s" %self.ca_certificates_file + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) key_file = self.http_connection_kwargs.get('key_file', None) cert_file = self.http_connection_kwargs.get('cert_file', None) sslSock = ssl.wrap_socket(sock, keyfile=key_file, @@ -847,6 +868,9 @@ except AttributeError: request.headers['Host'] = self.host.split(':', 1)[0] + def set_request_hook(self, hook): + self.request_hook = hook + def _mexe(self, request, sender=None, override_num_retries=None, retry_handler=None): """ @@ -887,8 +911,9 @@ # the port info. All others should be now be up to date and # not include the port. if 's3' not in self._required_auth_capability(): - self.set_host_header(request) - + if not getattr(self, 'anon', False): + self.set_host_header(request) + request.start_time = datetime.now() if callable(sender): response = sender(connection, request.method, request.path, request.body, request.headers) @@ -929,6 +954,8 @@ else: self.put_http_connection(request.host, request.port, self.is_secure, connection) + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response) return response else: scheme, request.host, request.path, \ @@ -969,6 +996,8 @@ # and stil haven't succeeded. So, if we have a response object, # use it to raise an exception. # Otherwise, raise the exception that must have already happened. + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response, error=True) if response: raise BotoServerError(response.status, response.reason, body) elif e: @@ -982,11 +1011,11 @@ path = self.get_path(path) if auth_path is not None: auth_path = self.get_path(auth_path) - if params == None: + if params is None: params = {} else: params = params.copy() - if headers == None: + if headers is None: headers = {} else: headers = headers.copy() @@ -1033,14 +1062,15 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=None, debug=0, https_connection_factory=None, path='/', security_token=None, - validate_certs=True): - AWSAuthConnection.__init__(self, host, aws_access_key_id, + validate_certs=True, profile_name=None): + super(AWSQueryConnection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return [] diff -Nru python-boto-2.20.1/boto/contrib/ymlmessage.py python-boto-2.29.1/boto/contrib/ymlmessage.py --- python-boto-2.20.1/boto/contrib/ymlmessage.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/contrib/ymlmessage.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -43,7 +43,7 @@ def __init__(self, queue=None, body='', xml_attrs=None): self.data = None - Message.__init__(self, queue, body) + super(YAMLMessage, self).__init__(queue, body) def set_body(self, body): self.data = yaml.load(body) diff -Nru python-boto-2.20.1/boto/core/auth.py python-boto-2.29.1/boto/core/auth.py --- python-boto-2.20.1/boto/core/auth.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/auth.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -import requests.packages.urllib3 -import hmac -import base64 -from hashlib import sha256 -import sys -import datetime - -try: - from urllib.parse import quote -except ImportError: - from urllib import quote - - -class SigV2Auth(object): - """ - Sign an Query Signature V2 request. - """ - def __init__(self, credentials, api_version=''): - self.credentials = credentials - self.api_version = api_version - self.hmac = hmac.new(self.credentials.secret_key.encode('utf-8'), - digestmod=sha256) - - def calc_signature(self, args): - scheme, host, port = requests.packages.urllib3.get_host(args['url']) - string_to_sign = '%s\n%s\n%s\n' % (args['method'], host, '/') - hmac = self.hmac.copy() - args['params']['SignatureMethod'] = 'HmacSHA256' - if self.credentials.token: - args['params']['SecurityToken'] = self.credentials.token - sorted_params = sorted(args['params']) - pairs = [] - for key in sorted_params: - value = args['params'][key] - pairs.append(quote(key, safe='') + '=' + - quote(value, safe='-_~')) - qs = '&'.join(pairs) - string_to_sign += qs - print('string_to_sign') - print(string_to_sign) - hmac.update(string_to_sign.encode('utf-8')) - b64 = base64.b64encode(hmac.digest()).strip().decode('utf-8') - return (qs, b64) - - def add_auth(self, args): - args['params']['Action'] = 'DescribeInstances' - args['params']['AWSAccessKeyId'] = self.credentials.access_key - args['params']['SignatureVersion'] = '2' - args['params']['Timestamp'] = datetime.datetime.utcnow().isoformat() - args['params']['Version'] = self.api_version - qs, signature = self.calc_signature(args) - args['params']['Signature'] = signature - if args['method'] == 'POST': - args['data'] = args['params'] - args['params'] = {} diff -Nru python-boto-2.20.1/boto/core/credentials.py python-boto-2.29.1/boto/core/credentials.py --- python-boto-2.20.1/boto/core/credentials.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/credentials.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -import os -from six.moves import configparser -from boto.compat import json -import requests - - -class Credentials(object): - """ - Holds the credentials needed to authenticate requests. In addition - the Credential object knows how to search for credentials and how - to choose the right credentials when multiple credentials are found. - """ - - def __init__(self, access_key=None, secret_key=None, token=None): - self.access_key = access_key - self.secret_key = secret_key - self.token = token - - -def _search_md(url='http://169.254.169.254/latest/meta-data/iam/'): - d = {} - try: - r = requests.get(url, timeout=.1) - if r.content: - fields = r.content.split('\n') - for field in fields: - if field.endswith('/'): - d[field[0:-1]] = get_iam_role(url + field) - else: - val = requests.get(url + field).content - if val[0] == '{': - val = json.loads(val) - else: - p = val.find('\n') - if p > 0: - val = r.content.split('\n') - d[field] = val - except (requests.Timeout, requests.ConnectionError): - pass - return d - - -def search_metadata(**kwargs): - credentials = None - metadata = _search_md() - # Assuming there's only one role on the instance profile. - if metadata: - metadata = metadata['iam']['security-credentials'].values()[0] - credentials = Credentials(metadata['AccessKeyId'], - metadata['SecretAccessKey'], - metadata['Token']) - return credentials - - -def search_environment(**kwargs): - """ - Search for credentials in explicit environment variables. - """ - credentials = None - access_key = os.environ.get(kwargs['access_key_name'].upper(), None) - secret_key = os.environ.get(kwargs['secret_key_name'].upper(), None) - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - - -def search_file(**kwargs): - """ - If the 'AWS_CREDENTIAL_FILE' environment variable exists, parse that - file for credentials. - """ - credentials = None - if 'AWS_CREDENTIAL_FILE' in os.environ: - persona = kwargs.get('persona', 'default') - access_key_name = kwargs['access_key_name'] - secret_key_name = kwargs['secret_key_name'] - access_key = secret_key = None - path = os.getenv('AWS_CREDENTIAL_FILE') - path = os.path.expandvars(path) - path = os.path.expanduser(path) - cp = configparser.RawConfigParser() - cp.read(path) - if not cp.has_section(persona): - raise ValueError('Persona: %s not found' % persona) - if cp.has_option(persona, access_key_name): - access_key = cp.get(persona, access_key_name) - else: - access_key = None - if cp.has_option(persona, secret_key_name): - secret_key = cp.get(persona, secret_key_name) - else: - secret_key = None - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - - -def search_boto_config(**kwargs): - """ - Look for credentials in boto config file. - """ - credentials = access_key = secret_key = None - if 'BOTO_CONFIG' in os.environ: - paths = [os.environ['BOTO_CONFIG']] - else: - paths = ['/etc/boto.cfg', '~/.boto'] - paths = [os.path.expandvars(p) for p in paths] - paths = [os.path.expanduser(p) for p in paths] - cp = configparser.RawConfigParser() - cp.read(paths) - if cp.has_section('Credentials'): - access_key = cp.get('Credentials', 'aws_access_key_id') - secret_key = cp.get('Credentials', 'aws_secret_access_key') - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - -AllCredentialFunctions = [search_environment, - search_file, - search_boto_config, - search_metadata] - - -def get_credentials(persona='default'): - for cred_fn in AllCredentialFunctions: - credentials = cred_fn(persona=persona, - access_key_name='access_key', - secret_key_name='secret_key') - if credentials: - break - return credentials diff -Nru python-boto-2.20.1/boto/core/dictresponse.py python-boto-2.29.1/boto/core/dictresponse.py --- python-boto-2.20.1/boto/core/dictresponse.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/dictresponse.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,178 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# - -import xml.sax - - -def pythonize_name(name, sep='_'): - s = '' - if name[0].isupper: - s = name[0].lower() - for c in name[1:]: - if c.isupper(): - s += sep + c.lower() - else: - s += c - return s - - -class XmlHandler(xml.sax.ContentHandler): - - def __init__(self, root_node, connection): - self.connection = connection - self.nodes = [('root', root_node)] - self.current_text = '' - - def startElement(self, name, attrs): - self.current_text = '' - t = self.nodes[-1][1].startElement(name, attrs, self.connection) - if t != None: - if isinstance(t, tuple): - self.nodes.append(t) - else: - self.nodes.append((name, t)) - - def endElement(self, name): - self.nodes[-1][1].endElement(name, self.current_text, self.connection) - if self.nodes[-1][0] == name: - self.nodes.pop() - self.current_text = '' - - def characters(self, content): - self.current_text += content - - def parse(self, s): - xml.sax.parseString(s, self) - - -class Element(dict): - - def __init__(self, connection=None, element_name=None, - stack=None, parent=None, list_marker=None, - item_marker=None, pythonize_name=False): - dict.__init__(self) - self.connection = connection - self.element_name = element_name - self.list_marker = list_marker or ['Set'] - self.item_marker = item_marker or ['member', 'item'] - if stack is None: - self.stack = [] - else: - self.stack = stack - self.pythonize_name = pythonize_name - self.parent = parent - - def __getattr__(self, key): - if key in self: - return self[key] - for k in self: - e = self[k] - if isinstance(e, Element): - try: - return getattr(e, key) - except AttributeError: - pass - raise AttributeError - - def get_name(self, name): - if self.pythonize_name: - name = pythonize_name(name) - return name - - def startElement(self, name, attrs, connection): - self.stack.append(name) - for lm in self.list_marker: - if name.endswith(lm): - l = ListElement(self.connection, name, self.list_marker, - self.item_marker, self.pythonize_name) - self[self.get_name(name)] = l - return l - if len(self.stack) > 0: - element_name = self.stack[-1] - e = Element(self.connection, element_name, self.stack, self, - self.list_marker, self.item_marker, - self.pythonize_name) - self[self.get_name(element_name)] = e - return (element_name, e) - else: - return None - - def endElement(self, name, value, connection): - if len(self.stack) > 0: - self.stack.pop() - value = value.strip() - if value: - if isinstance(self.parent, Element): - self.parent[self.get_name(name)] = value - elif isinstance(self.parent, ListElement): - self.parent.append(value) - - -class ListElement(list): - - def __init__(self, connection=None, element_name=None, - list_marker=['Set'], item_marker=('member', 'item'), - pythonize_name=False): - list.__init__(self) - self.connection = connection - self.element_name = element_name - self.list_marker = list_marker - self.item_marker = item_marker - self.pythonize_name = pythonize_name - - def get_name(self, name): - if self.pythonize_name: - name = utils.pythonize_name(name) - return name - - def startElement(self, name, attrs, connection): - for lm in self.list_marker: - if name.endswith(lm): - l = ListElement(self.connection, name, - self.list_marker, self.item_marker, - self.pythonize_name) - setattr(self, self.get_name(name), l) - return l - if name in self.item_marker: - e = Element(self.connection, name, parent=self, - list_marker=self.list_marker, - item_marker=self.item_marker, - pythonize_name=self.pythonize_name) - self.append(e) - return e - else: - return None - - def endElement(self, name, value, connection): - if name == self.element_name: - if len(self) > 0: - empty = [] - for e in self: - if isinstance(e, Element): - if len(e) == 0: - empty.append(e) - for e in empty: - self.remove(e) - else: - setattr(self, self.get_name(name), value) diff -Nru python-boto-2.20.1/boto/core/__init__.py python-boto-2.29.1/boto/core/__init__.py --- python-boto-2.20.1/boto/core/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# diff -Nru python-boto-2.20.1/boto/core/README python-boto-2.29.1/boto/core/README --- python-boto-2.20.1/boto/core/README 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -What's This All About? -====================== - -This directory contains the beginnings of what is hoped will be the -new core of boto. We want to move from using httplib to using -requests. We also want to offer full support for Python 2.6, 2.7, and -3.x. This is a pretty big change and will require some time to roll -out but this module provides a starting point. - -What you will find in this module: - -* auth.py provides a SigV2 authentication packages as a args hook for requests. -* credentials.py provides a way of finding AWS credentials (see below). -* dictresponse.py provides a generic response handler that parses XML responses - and returns them as nested Python data structures. -* service.py provides a simple example of a service that actually makes an EC2 - request and returns a response. - -Credentials -=========== - -Credentials are being handled a bit differently here. The following -describes the order of search for credentials: - -1. If your local environment for has ACCESS_KEY and SECRET_KEY variables - defined, these will be used. - -2. If your local environment has AWS_CREDENTIAL_FILE defined, it is assumed - that it will be a config file with entries like this: - - [default] - access_key = xxxxxxxxxxxxxxxx - sercret_key = xxxxxxxxxxxxxxxxxx - - [test] - access_key = yyyyyyyyyyyyyy - secret_key = yyyyyyyyyyyyyyyyyy - - Each section in the config file is called a persona and you can reference - a particular persona by name when instantiating a Service class. - -3. If a standard boto config file is found that contains credentials, those - will be used. - -4. If temporary credentials for an IAM Role are found in the instance - metadata of an EC2 instance, these credentials will be used. - -Trying Things Out -================= -To try this code out, cd to the directory containing the core module. - - >>> import core.service - >>> s = core.service.Service() - >>> s.describe_instances() - -This code should return a Python data structure containing information -about your currently running EC2 instances. This example should run in -Python 2.6.x, 2.7.x and Python 3.x. \ No newline at end of file diff -Nru python-boto-2.20.1/boto/core/service.py python-boto-2.29.1/boto/core/service.py --- python-boto-2.20.1/boto/core/service.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/core/service.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# - -import requests -from .auth import SigV2Auth -from .credentials import get_credentials -from .dictresponse import Element, XmlHandler - - -class Service(object): - """ - This is a simple example service that connects to the EC2 endpoint - and supports a single request (DescribeInstances) to show how to - use the requests-based code rather than the standard boto code which - is based on httplib. At the moment, the only auth mechanism - supported is SigV2. - """ - - def __init__(self, host='https://ec2.us-east-1.amazonaws.com', - path='/', api_version='2012-03-01', persona=None): - self.credentials = get_credentials(persona) - self.auth = SigV2Auth(self.credentials, api_version=api_version) - self.host = host - self.path = path - - def get_response(self, params, list_marker=None): - r = requests.post(self.host, params=params, - hooks={'args': self.auth.add_auth}) - r.encoding = 'utf-8' - body = r.text.encode('utf-8') - e = Element(list_marker=list_marker, pythonize_name=True) - h = XmlHandler(e, self) - h.parse(body) - return e - - def build_list_params(self, params, items, label): - if isinstance(items, str): - items = [items] - for i in range(1, len(items) + 1): - params['%s.%d' % (label, i)] = items[i - 1] - - def describe_instances(self, instance_ids=None): - params = {} - if instance_ids: - self.build_list_params(params, instance_ids, 'InstanceId') - return self.get_response(params) diff -Nru python-boto-2.20.1/boto/datapipeline/__init__.py python-boto-2.29.1/boto/datapipeline/__init__.py --- python-boto-2.20.1/boto/datapipeline/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/datapipeline/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the AWS Datapipeline service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.datapipeline.layer1 import DataPipelineConnection + return get_regions('datapipeline', connection_cls=DataPipelineConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.20.1/boto/datapipeline/layer1.py python-boto-2.29.1/boto/datapipeline/layer1.py --- python-boto-2.20.1/boto/datapipeline/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/datapipeline/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -85,12 +85,12 @@ def __init__(self, **kwargs): - region = kwargs.get('region') + region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(DataPipelineConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): diff -Nru python-boto-2.20.1/boto/directconnect/__init__.py python-boto-2.29.1/boto/directconnect/__init__.py --- python-boto-2.20.1/boto/directconnect/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/directconnect/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,32 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.directconnect.layer1 import DirectConnectConnection - - return [RegionInfo(name='us-east-1', - endpoint='directconnect.us-east-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='us-west-1', - endpoint='directconnect.us-west-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='us-west-2', - endpoint='directconnect.us-west-2.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='eu-west-1', - endpoint='directconnect.eu-west-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-1', - endpoint='directconnect.ap-southeast-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-2', - endpoint='directconnect.ap-southeast-2.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-3', - endpoint='directconnect.ap-southeast-3.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='sa-east-1', - endpoint='directconnect.sa-east-1.amazonaws.com', - connection_cls=DirectConnectConnection), - ] + return get_regions('directconnect', connection_cls=DirectConnectConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/directconnect/layer1.py python-boto-2.29.1/boto/directconnect/layer1.py --- python-boto-2.20.1/boto/directconnect/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/directconnect/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -79,7 +79,7 @@ if 'host' not in kwargs: kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(DirectConnectConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): diff -Nru python-boto-2.20.1/boto/dynamodb/__init__.py python-boto-2.29.1/boto/dynamodb/__init__.py --- python-boto-2.20.1/boto/dynamodb/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,34 +32,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.dynamodb.layer2 - return [RegionInfo(name='us-east-1', - endpoint='dynamodb.us-east-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-gov-west-1', - endpoint='dynamodb.us-gov-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-west-1', - endpoint='dynamodb.us-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-west-2', - endpoint='dynamodb.us-west-2.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-northeast-1', - endpoint='dynamodb.ap-northeast-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-southeast-1', - endpoint='dynamodb.ap-southeast-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-southeast-2', - endpoint='dynamodb.ap-southeast-2.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='eu-west-1', - endpoint='dynamodb.eu-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='sa-east-1', - endpoint='dynamodb.sa-east-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - ] + return get_regions('dynamodb', connection_cls=boto.dynamodb.layer2.Layer2) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/dynamodb/item.py python-boto-2.29.1/boto/dynamodb/item.py --- python-boto-2.20.1/boto/dynamodb/item.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb/item.py 2014-05-30 20:49:34.000000000 +0000 @@ -41,13 +41,13 @@ self._updates = None self._hash_key_name = self.table.schema.hash_key_name self._range_key_name = self.table.schema.range_key_name - if attrs == None: + if attrs is None: attrs = {} - if hash_key == None: + if hash_key is None: hash_key = attrs.get(self._hash_key_name, None) self[self._hash_key_name] = hash_key if self._range_key_name: - if range_key == None: + if range_key is None: range_key = attrs.get(self._range_key_name, None) self[self._range_key_name] = range_key self._updates = {} diff -Nru python-boto-2.20.1/boto/dynamodb/layer1.py python-boto-2.29.1/boto/dynamodb/layer1.py --- python-boto-2.20.1/boto/dynamodb/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -74,7 +74,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, - validate_certs=True, validate_checksums=True): + validate_certs=True, validate_checksums=True, profile_name=None): if not region: region_name = boto.config.get('DynamoDB', 'region', self.DefaultRegionName) @@ -84,12 +84,13 @@ break self.region = region - AWSAuthConnection.__init__(self, self.region.endpoint, + super(Layer1, self).__init__(self.region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, debug=debug, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.throughput_exceeded_events = 0 self._validate_checksums = boto.config.getbool( 'DynamoDB', 'validate_checksums', validate_checksums) diff -Nru python-boto-2.20.1/boto/dynamodb/layer2.py python-boto-2.29.1/boto/dynamodb/layer2.py --- python-boto-2.20.1/boto/dynamodb/layer2.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb/layer2.py 2014-05-30 20:49:34.000000000 +0000 @@ -145,11 +145,13 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, - validate_certs=True, dynamizer=LossyFloatDynamizer): + validate_certs=True, dynamizer=LossyFloatDynamizer, + profile_name=None): self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, debug, security_token, region, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.dynamizer = dynamizer() def use_decimals(self): diff -Nru python-boto-2.20.1/boto/dynamodb/types.py python-boto-2.29.1/boto/dynamodb/types.py --- python-boto-2.20.1/boto/dynamodb/types.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb/types.py 2014-05-30 20:49:34.000000000 +0000 @@ -136,6 +136,9 @@ class Binary(object): def __init__(self, value): + if not isinstance(value, basestring): + raise TypeError('Value must be a string of binary data!') + self.value = value def encode(self): diff -Nru python-boto-2.20.1/boto/dynamodb2/exceptions.py python-boto-2.29.1/boto/dynamodb2/exceptions.py --- python-boto-2.20.1/boto/dynamodb2/exceptions.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/exceptions.py 2014-05-30 20:49:34.000000000 +0000 @@ -72,3 +72,7 @@ class QueryError(DynamoDBError): pass + + +class ItemNotFound(DynamoDBError): + pass diff -Nru python-boto-2.20.1/boto/dynamodb2/fields.py python-boto-2.29.1/boto/dynamodb2/fields.py --- python-boto-2.20.1/boto/dynamodb2/fields.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/fields.py 2014-05-30 20:49:34.000000000 +0000 @@ -91,10 +91,10 @@ class BaseIndexField(object): """ - An abstract class for defining schema fields. + An abstract class for defining schema indexes. - Contains most of the core functionality for the field. Subclasses must - define an ``attr_type`` to pass to DynamoDB. + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. """ def __init__(self, name, parts): self.name = name @@ -139,7 +139,7 @@ }, ], 'Projection': { - 'ProjectionType': 'KEYS_ONLY, + 'ProjectionType': 'KEYS_ONLY', } } @@ -210,3 +210,128 @@ schema_data = super(IncludeIndex, self).schema() schema_data['Projection']['NonKeyAttributes'] = self.includes_fields return schema_data + + +class GlobalBaseIndexField(BaseIndexField): + """ + An abstract class for defining global indexes. + + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. + """ + throughput = { + 'read': 5, + 'write': 5, + } + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + + if throughput is not None: + self.throughput = throughput + + super(GlobalBaseIndexField, self).__init__(*args, **kwargs) + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> index.schema() + { + 'IndexName': 'LastNameIndex', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + } + + """ + schema_data = super(GlobalBaseIndexField, self).schema() + schema_data['ProvisionedThroughput'] = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + return schema_data + + +class GlobalAllIndex(GlobalBaseIndexField): + """ + An index signifying all fields should be in the index. + + Example:: + + >>> GlobalAllIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'ALL' + + +class GlobalKeysOnlyIndex(GlobalBaseIndexField): + """ + An index signifying only key fields should be in the index. + + Example:: + + >>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'KEYS_ONLY' + + +class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex): + """ + An index signifying only certain fields should be in the index. + + Example:: + + >>> GlobalIncludeIndex('GenderIndex', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... includes=['gender'], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'INCLUDE' + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + IncludeIndex.__init__(self, *args, **kwargs) + if throughput: + kwargs['throughput'] = throughput + GlobalBaseIndexField.__init__(self, *args, **kwargs) + + def schema(self): + # Pick up the includes. + schema_data = IncludeIndex.schema(self) + # Also the throughput. + schema_data.update(GlobalBaseIndexField.schema(self)) + return schema_data diff -Nru python-boto-2.20.1/boto/dynamodb2/__init__.py python-boto-2.29.1/boto/dynamodb2/__init__.py --- python-boto-2.20.1/boto/dynamodb2/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,34 +32,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.dynamodb2.layer1 import DynamoDBConnection - return [RegionInfo(name='us-east-1', - endpoint='dynamodb.us-east-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-gov-west-1', - endpoint='dynamodb.us-gov-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-west-1', - endpoint='dynamodb.us-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-west-2', - endpoint='dynamodb.us-west-2.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='eu-west-1', - endpoint='dynamodb.eu-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-northeast-1', - endpoint='dynamodb.ap-northeast-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-southeast-1', - endpoint='dynamodb.ap-southeast-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-southeast-2', - endpoint='dynamodb.ap-southeast-2.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='sa-east-1', - endpoint='dynamodb.sa-east-1.amazonaws.com', - connection_cls=DynamoDBConnection), - ] + return get_regions('dynamodb', connection_cls=DynamoDBConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/dynamodb2/layer1.py python-boto-2.29.1/boto/dynamodb2/layer1.py --- python-boto-2.20.1/boto/dynamodb2/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -37,7 +37,112 @@ """ Amazon DynamoDB **Overview** This is the Amazon DynamoDB API Reference. This guide provides - descriptions and samples of the Amazon DynamoDB API. + descriptions and samples of the low-level DynamoDB API. For + information about DynamoDB application development, go to the + `Amazon DynamoDB Developer Guide`_. + + Instead of making the requests to the low-level DynamoDB API + directly from your application, we recommend that you use the AWS + Software Development Kits (SDKs). The easy-to-use libraries in the + AWS SDKs make it unnecessary to call the low-level DynamoDB API + directly from your application. The libraries take care of request + authentication, serialization, and connection management. For more + information, go to `Using the AWS SDKs with DynamoDB`_ in the + Amazon DynamoDB Developer Guide . + + If you decide to code against the low-level DynamoDB API directly, + you will need to write the necessary code to authenticate your + requests. For more information on signing your requests, go to + `Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide . + + The following are short descriptions of each low-level API action, + organized by function. + + **Managing Tables** + + + + + CreateTable - Creates a table with user-specified provisioned + throughput settings. You must designate one attribute as the hash + primary key for the table; you can optionally designate a second + attribute as the range primary key. DynamoDB creates indexes on + these key attributes for fast data access. Optionally, you can + create one or more secondary indexes, which provide fast data + access using non-key attributes. + + DescribeTable - Returns metadata for a table, such as table + size, status, and index information. + + UpdateTable - Modifies the provisioned throughput settings for a + table. Optionally, you can modify the provisioned throughput + settings for global secondary indexes on the table. + + ListTables - Returns a list of all tables associated with the + current AWS account and endpoint. + + DeleteTable - Deletes a table and all of its indexes. + + + + For conceptual information about managing tables, go to `Working + with Tables`_ in the Amazon DynamoDB Developer Guide . + + **Reading Data** + + + + + GetItem - Returns a set of attributes for the item that has a + given primary key. By default, GetItem performs an eventually + consistent read; however, applications can specify a strongly + consistent read instead. + + BatchGetItem - Performs multiple GetItem requests for data items + using their primary keys, from one table or multiple tables. The + response from BatchGetItem has a size limit of 1 MB and returns a + maximum of 100 items. Both eventually consistent and strongly + consistent reads can be used. + + Query - Returns one or more items from a table or a secondary + index. You must provide a specific hash key value. You can narrow + the scope of the query using comparison operators against a range + key value, or on the index key. Query supports either eventual or + strong consistency. A single response has a size limit of 1 MB. + + Scan - Reads every item in a table; the result set is eventually + consistent. You can limit the number of items returned by + filtering the data attributes, using conditional expressions. Scan + can be used to enable ad-hoc querying of a table against non-key + attributes; however, since this is a full table scan without using + an index, Scan should not be used for any application query use + case that requires predictable performance. + + + + For conceptual information about reading data, go to `Working with + Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB + Developer Guide . + + **Modifying Data** + + + + + PutItem - Creates a new item, or replaces an existing item with + a new item (including all the attributes). By default, if an item + in the table already exists with the same primary key, the new + item completely replaces the existing item. You can use + conditional operators to replace an item only if its attribute + values match certain conditions, or to insert a new item only if + that item doesn't already exist. + + UpdateItem - Modifies the attributes of an existing item. You + can also use conditional operators to perform an update only if + the item's attribute values match certain conditions. + + DeleteItem - Deletes an item in a table by primary key. You can + use conditional operators to perform a delete an item only if the + item's attribute values match certain conditions. + + BatchWriteItem - Performs multiple PutItem and DeleteItem + requests across multiple tables in a single request. A failure of + any request(s) in the batch will not cause the entire + BatchWriteItem operation to fail. Supports batches of up to 25 + items to put or delete, with a maximum total request size of 1 MB. + + + + For conceptual information about modifying data, go to `Working + with Items`_ and `Query and Scan Operations`_ in the Amazon + DynamoDB Developer Guide . """ APIVersion = "2012-08-10" DefaultRegionName = "us-east-1" @@ -75,7 +180,7 @@ if 'host' not in kwargs: kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(DynamoDBConnection, self).__init__(**kwargs) self.region = region self._validate_checksums = boto.config.getbool( 'DynamoDB', 'validate_checksums', validate_checksums) @@ -91,7 +196,7 @@ items by primary key. A single operation can retrieve up to 1 MB of data, which can - comprise as many as 100 items. BatchGetItem will return a + contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, @@ -106,24 +211,38 @@ include its own logic to assemble the pages of results into one dataset. - If no items can be processed because of insufficient - provisioned throughput on each of the tables involved in the - request, BatchGetItem throws - ProvisionedThroughputExceededException . + If none of the items can be processed due to insufficient + provisioned throughput on all of the tables in the request, + then BatchGetItem will throw a + ProvisionedThroughputExceededException . If at least one of + the items is successfully processed, then BatchGetItem + completes successfully, while returning the keys of the unread + items in UnprocessedKeys . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to `True` for any or all tables. - In order to minimize response latency, BatchGetItem fetches + In order to minimize response latency, BatchGetItem retrieves items in parallel. - When designing your application, keep in mind that Amazon - DynamoDB does not return attributes in any particular order. - To help parse the response by item, include the primary key - values for the items in your request in the AttributesToGet - parameter. + When designing your application, keep in mind that DynamoDB + does not return attributes in any particular order. To help + parse the response by item, include the primary key values for + the items in your request in the AttributesToGet parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum @@ -141,17 +260,27 @@ + Keys - An array of primary key attribute values that define specific - items in the table. + items in the table. For each primary key, you must provide all of + the key attributes. For example, with a hash type primary key, you + only need to specify the hash attribute. For a hash-and-range type + primary key, you must specify both the hash attribute and the range + attribute. + AttributesToGet - One or more attributes to be retrieved from the - table or index. By default, all attributes are returned. If a - specified attribute is not found, it does not appear in the result. + table. By default, all attributes are returned. If a specified + attribute is not found, it does not appear in the result. Note that + AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + ConsistentRead - If `True`, a strongly consistent read is used; if `False` (the default), an eventually consistent read is used. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ params = {'RequestItems': request_items, } @@ -183,27 +312,39 @@ unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. - To write one item, you can use the PutItem operation; to - delete one item, you can use the DeleteItem operation. + Note that if none of the items can be processed due to + insufficient provisioned throughput on all of the tables in + the request, then BatchGetItem will throw a + ProvisionedThroughputExceededException . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide. With BatchWriteItem , you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce - (EMR), or copy data from another database into Amazon - DynamoDB. In order to improve performance with these large- - scale operations, BatchWriteItem does not behave in the same - way as individual PutItem and DeleteItem calls would For - example, you cannot specify conditions on individual put and - delete requests, and BatchWriteItem does not return deleted - items in the response. + (EMR), or copy data from another database into DynamoDB. In + order to improve performance with these large-scale + operations, BatchWriteItem does not behave in the same way as + individual PutItem and DeleteItem calls would For example, you + cannot specify conditions on individual put and delete + requests, and BatchWriteItem does not return deleted items in + the response. If you use a programming language that supports concurrency, such as Java, you can use threads to write items in parallel. Your application must include the necessary logic to manage - the threads. - - With languages that don't support threading, such as PHP, - BatchWriteItem will write or delete the specified items one at - a time. In both situations, BatchWriteItem provides an + the threads. With languages that don't support threading, such + as PHP, you must update or delete the specified items one at a + time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity @@ -215,8 +356,8 @@ operations on nonexistent items consume one write capacity unit. - If one or more of the following is true, Amazon DynamoDB - rejects the entire batch write operation: + If one or more of the following is true, DynamoDB rejects the + entire batch write operation: + One or more tables specified in the BatchWriteItem request @@ -241,8 +382,12 @@ The item to be deleted is identified by a Key subelement: + Key - A map of primary key attribute values that uniquely identify - the item. Each entry in this map consists of an attribute name and - an attribute value. + the ! item. Each entry in this map consists of an attribute name + and an attribute value. For each primary key, you must provide all + of the key attributes. For example, with a hash type primary key, + you only need to specify the hash attribute. For a hash-and-range + type primary key, you must specify both the hash attribute and the + range attribute. + PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement: @@ -257,15 +402,17 @@ match those of the schema in the table's attribute definition. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'RequestItems': request_items, } @@ -286,16 +433,15 @@ the tables in different regions. CreateTable is an asynchronous operation. Upon receiving a - CreateTable request, Amazon DynamoDB immediately returns a - response with a TableStatus of `CREATING`. After the table is - created, Amazon DynamoDB sets the TableStatus to `ACTIVE`. You - can perform read and write operations only on an `ACTIVE` - table. - - If you want to create multiple tables with local secondary - indexes on them, you must create them sequentially. Only one - table with local secondary indexes can be in the `CREATING` - state at any given time. + CreateTable request, DynamoDB immediately returns a response + with a TableStatus of `CREATING`. After the table is created, + DynamoDB sets the TableStatus to `ACTIVE`. You can perform + read and write operations only on an `ACTIVE` table. + + If you want to create multiple tables with secondary indexes + on them, you must create them sequentially. Only one table + with secondary indexes can be in the `CREATING` state at any + given time. You can use the DescribeTable API to check the table status. @@ -308,9 +454,9 @@ :type key_schema: list :param key_schema: Specifies the attributes that make up the primary - key for the table. The attributes in KeySchema must also be defined - in the AttributeDefinitions array. For more information, see `Data - Model`_ in the Amazon DynamoDB Developer Guide. + key for a table or an index. The attributes in KeySchema must also + be defined in the AttributeDefinitions array. For more information, + see `Data Model`_ in the Amazon DynamoDB Developer Guide. Each KeySchemaElement in the array is composed of: @@ -331,18 +477,19 @@ :type local_secondary_indexes: list :param local_secondary_indexes: - One or more secondary indexes (the maximum is five) to be created on - the table. Each index is scoped to a given hash key value. There is - a 10 gigabyte size limit per hash key; otherwise, the size of a - local secondary index is unconstrained. - - Each secondary index in the array includes the following: + One or more local secondary indexes (the maximum is five) to be created + on the table. Each index is scoped to a given hash key value. There + is a 10 GB size limit per hash key; otherwise, the size of a local + secondary index is unconstrained. + + Each local secondary index in the array includes the following: - + IndexName - The name of the secondary index. Must be unique only for - this table. - + KeySchema - Specifies the key schema for the index. The key schema - must begin with the same hash key attribute as the table. + + IndexName - The name of the local secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the local secondary index. + The key schema must begin with the same hash key attribute as the + table. + Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically @@ -358,19 +505,51 @@ + `ALL` - All of the table attributes are projected into the index. + NonKeyAttributes - A list of one or more non-key attribute names that - are projected into the index. The total count of attributes - specified in NonKeyAttributes , summed across all of the local + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. :type global_secondary_indexes: list :param global_secondary_indexes: + One or more global secondary indexes (the maximum is five) to be + created on the table. Each global secondary index in the array + includes the following: + + + + IndexName - The name of the global secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the global secondary index. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + + ProvisionedThroughput - The provisioned throughput settings for the + global secondary index, consisting of read and write capacity + units. :type provisioned_throughput: dict - :param provisioned_throughput: The provisioned throughput settings for - the specified table. The settings can be modified using the - UpdateTable operation. + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. For current minimum and maximum provisioned throughput values, see `Limits`_ in the Amazon DynamoDB Developer Guide. @@ -388,7 +567,8 @@ return self.make_request(action='CreateTable', body=json.dumps(params)) - def delete_item(self, table_name, key, expected=None, return_values=None, + def delete_item(self, table_name, key, expected=None, + conditional_operator=None, return_values=None, return_consumed_capacity=None, return_item_collection_metrics=None): """ @@ -406,8 +586,8 @@ Conditional deletes are useful for only deleting items if specific conditions are met. If those conditions are met, - Amazon DynamoDB performs the delete. Otherwise, the item is - not deleted. + DynamoDB performs the delete. Otherwise, the item is not + deleted. :type table_name: string :param table_name: The name of the table from which to delete the item. @@ -415,50 +595,181 @@ :type key: map :param key: A map of attribute names to AttributeValue objects, representing the primary key of the item to delete. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the DeleteItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the DeleteItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. :type return_values: string :param return_values: @@ -472,20 +783,24 @@ + `ALL_OLD` - The content of the old item is returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'TableName': table_name, 'Key': key, } if expected is not None: params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if return_values is not None: params['ReturnValues'] = return_values if return_consumed_capacity is not None: @@ -499,20 +814,20 @@ """ The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in - the `DELETING` state until Amazon DynamoDB completes the - deletion. If the table is in the `ACTIVE` state, you can - delete it. If a table is in `CREATING` or `UPDATING` states, - then Amazon DynamoDB returns a ResourceInUseException . If the - specified table does not exist, Amazon DynamoDB returns a - ResourceNotFoundException . If table is already in the - `DELETING` state, no error is returned. + the `DELETING` state until DynamoDB completes the deletion. If + the table is in the `ACTIVE` state, you can delete it. If a + table is in `CREATING` or `UPDATING` states, then DynamoDB + returns a ResourceInUseException . If the specified table does + not exist, DynamoDB returns a ResourceNotFoundException . If + table is already in the `DELETING` state, no error is + returned. - Amazon DynamoDB might continue to accept data read and write + DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem , on a table in the `DELETING` state until the table deletion is complete. - When you delete a table, any local secondary indexes on that - table are also deleted. + When you delete a table, any indexes on that table are also + deleted. Use the DescribeTable API to check the status of the table. @@ -557,12 +872,20 @@ :type key: map :param key: A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type attributes_to_get: list :param attributes_to_get: The names of one or more attributes to retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. :type consistent_read: boolean :param consistent_read: If set to `True`, then the operation uses @@ -570,9 +893,11 @@ are used. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ params = {'TableName': table_name, 'Key': key, } @@ -587,17 +912,19 @@ def list_tables(self, exclusive_start_table_name=None, limit=None): """ - Returns an array of all the tables associated with the current - account and endpoint. + Returns an array of table names associated with the current + account and endpoint. The output from ListTables is paginated, + with each page returning a maximum of 100 table names. :type exclusive_start_table_name: string - :param exclusive_start_table_name: The name of the table that starts - the list. If you already ran a ListTables operation and received a - LastEvaluatedTableName value in the response, use that value here - to continue the list. + :param exclusive_start_table_name: The first table name that this + operation will evaluate. Use the value that was returned for + LastEvaluatedTableName in a previous operation, so that you can + obtain the next page of results. :type limit: integer - :param limit: A maximum number of table names to return. + :param limit: A maximum number of table names to return. If this + parameter is not specified, the limit is 100. """ params = {} @@ -610,7 +937,8 @@ def put_item(self, table_name, item, expected=None, return_values=None, return_consumed_capacity=None, - return_item_collection_metrics=None): + return_item_collection_metrics=None, + conditional_operator=None): """ Creates a new item, or replaces an old item with a new item. If an item already exists in the specified table with the same @@ -635,8 +963,8 @@ description. To prevent a new item from replacing an existing item, use a - conditional put operation with Exists set to `False` for the - primary key attribute, or attributes. + conditional put operation with ComparisonOperator set to + `NULL` for the primary key attribute, or attributes. For more information about using this API, see `Working with Items`_ in the Amazon DynamoDB Developer Guide. @@ -648,6 +976,11 @@ :param item: A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. + You must provide all of the attributes for the primary key. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition. @@ -658,48 +991,161 @@ Each element in the Item map is an AttributeValue object. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the PutItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the PutItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . :type return_values: string :param return_values: @@ -714,15 +1160,31 @@ the content of the old item is returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. """ params = {'TableName': table_name, 'Item': item, } @@ -734,13 +1196,16 @@ params['ReturnConsumedCapacity'] = return_consumed_capacity if return_item_collection_metrics is not None: params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator return self.make_request(action='PutItem', body=json.dumps(params)) - def query(self, table_name, index_name=None, select=None, + def query(self, table_name, key_conditions, index_name=None, select=None, attributes_to_get=None, limit=None, consistent_read=None, - key_conditions=None, scan_index_forward=None, - exclusive_start_key=None, return_consumed_capacity=None): + query_filter=None, conditional_operator=None, + scan_index_forward=None, exclusive_start_key=None, + return_consumed_capacity=None): """ A Query operation directly accesses items from a table using the table primary key, or from an index using the index key. @@ -761,15 +1226,20 @@ and a LastEvaluatedKey . The LastEvaluatedKey is only provided if the results exceed 1 MB, or if you have used Limit . - To request a strongly consistent result, set ConsistentRead to - true. + You can query a table, a local secondary index, or a global + secondary index. For a query on a table or on a local + secondary index, you can set ConsistentRead to true and obtain + a strongly consistent result. Global secondary indexes support + eventually consistent reads only, so do not specify + ConsistentRead when querying a global secondary index. :type table_name: string :param table_name: The name of the table containing the requested items. :type index_name: string - :param index_name: The name of an index on the table to query. + :param index_name: The name of an index to query. This can be any local + secondary index or global secondary index on the table. :type select: string :param select: The attributes to be returned in the result. You can @@ -777,31 +1247,35 @@ of matching items, or in the case of an index, some or all of the attributes projected into the index. - + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table, - this is the default. For an index, this mode causes Amazon DynamoDB - to fetch the full item from the table for each matching item in the - index. If the index is configured to project all item attributes, - the matching items will not be fetched from the table. Fetching - items from the table incurs additional throughput cost and latency. + + `ALL_ATTRIBUTES`: Returns all of the item attributes from the + specified table or index. If you are querying a local secondary + index, then for each matching item in the index DynamoDB will fetch + the entire item from the parent table. If the index is configured + to project all item attributes, then all of the data can be + obtained from the local secondary index, and no fetching is + required.. + `ALL_PROJECTED_ATTRIBUTES`: Allowed only when querying an index. Retrieves all attributes which have been projected into the index. If the index is configured to project all attributes, this is - equivalent to specifying ALL_ATTRIBUTES . + equivalent to specifying `ALL_ATTRIBUTES`. + `COUNT`: Returns the number of matching items, rather than the matching items themselves. + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in AttributesToGet . This is equivalent to specifying AttributesToGet - without specifying any value for Select . If you are querying an - index and request only attributes that are projected into that - index, the operation will read only the index and not the table. If - any of the requested attributes are not projected into the index, - Amazon DynamoDB will need to fetch each matching item from the - table. This extra fetching incurs additional throughput cost and - latency. + without specifying any value for Select . If you are querying a + local secondary index and request only attributes that are + projected into that index, the operation will read only the index + and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. If you are querying + a global secondary index, you can only request attributes that are + projected into the index. Global secondary index queries cannot + fetch attributes from the parent table. - When neither Select nor AttributesToGet are specified, Amazon DynamoDB - defaults to `ALL_ATTRIBUTES` when accessing a table, and + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES` when accessing a table, and `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage @@ -813,75 +1287,87 @@ retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. - If you are querying an index and request only attributes that are - projected into that index, the operation will read only the index - and not the table. If any of the requested attributes are not - projected into the index, Amazon DynamoDB will need to fetch each - matching item from the table. This extra fetching incurs additional - throughput cost and latency. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. You cannot use both AttributesToGet and Select together in a Query request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying AttributesToGet without any value for Select .) + If you are querying a local secondary index and request only attributes + that are projected into that index, the operation will read only + the index and not the table. If any of the requested attributes are + not projected into the local secondary index, DynamoDB will fetch + each of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. + + If you are querying a global secondary index, you can only request + attributes that are projected into the index. Global secondary + index queries cannot fetch attributes from the parent table. + :type limit: integer :param limit: The maximum number of items to evaluate (not necessarily - the number of matching items). If Amazon DynamoDB processes the - number of items up to the limit while processing the results, it - stops the operation and returns the matching values up to that - point, and a LastEvaluatedKey to apply in a subsequent operation, - so that you can pick up where you left off. Also, if the processed - data set size exceeds 1 MB before Amazon DynamoDB reaches this - limit, it stops the operation and returns the matching values up to - the limit, and a LastEvaluatedKey to apply in a subsequent - operation to continue the operation. For more information see - `Query and Scan`_ in the Amazon DynamoDB Developer Guide. + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + LastEvaluatedKey to apply in a subsequent operation, so that you + can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + LastEvaluatedKey to apply in a subsequent operation to continue the + operation. For more information, see `Query and Scan`_ in the + Amazon DynamoDB Developer Guide. :type consistent_read: boolean :param consistent_read: If set to `True`, then the operation uses strongly consistent reads; otherwise, eventually consistent reads are used. + Strongly consistent reads are not supported on global secondary + indexes. If you query a global secondary index with ConsistentRead + set to `True`, you will receive an error message. :type key_conditions: map - :param key_conditions: - The selection criteria for the query. - + :param key_conditions: The selection criteria for the query. For a query on a table, you can only have conditions on the table primary key attributes. You must specify the hash key attribute name and value as an `EQ` condition. You can optionally specify a second condition, referring to the range key attribute. - For a query on a secondary index, you can only have conditions on the - index key attributes. You must specify the index hash attribute - name and value as an EQ condition. You can optionally specify a - second condition, referring to the index key range attribute. - - Multiple conditions are evaluated using "AND"; in other words, all of - the conditions must be met in order for an item to appear in the - results results. + For a query on an index, you can only have conditions on the index key + attributes. You must specify the index hash attribute name and + value as an EQ condition. You can optionally specify a second + condition, referring to the index key range attribute. + + If you specify more than one condition in the KeyConditions map, then + by default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) Each KeyConditions element consists of an attribute name to compare, along with the following: + AttributeValueList - One or more values to evaluate against the - supplied attribute. This list contains exactly one value, except - for a `BETWEEN` or `IN` comparison, in which case the list contains - two values. For type Number, value comparisons are numeric. String - value comparisons for greater than, equals, or less than are based - on ASCII character code values. For example, `a` is greater than - `A`, and `aa` is greater than `B`. For a list of code values, see + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. - For Binary, Amazon DynamoDB treats each byte of the binary data as + For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when evaluating query expressions. + ComparisonOperator - A comparator for evaluating attributes. For - example, equals, greater than, less than, etc. Valid comparison - operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH | - BETWEEN` For information on specifying data types in JSON, see - `JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The - following are descriptions of each comparison operator. + example, equals, greater than, less than, etc. For KeyConditions , + only the following comparison operators are supported: `EQ | LE | + LT | GE | GT | BEGINS_WITH | BETWEEN` The following are + descriptions of these comparison operators. + `EQ` : Equal. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set). If an item contains @@ -890,33 +1376,33 @@ not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", "1"]}`. + `LE` : Less than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `LT` : Less than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `GE` : Greater than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `GT` : Greater than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain - only one AttributeValue of type String or Binary (not a Number or a - set). The target attribute of the comparison must be a String or - Binary (not a Number or a set). + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + `BETWEEN` : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or @@ -928,14 +1414,68 @@ `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}` + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + :type query_filter: map + :param query_filter: + Evaluates the query results and returns only the desired values. + + If you specify more than one condition in the QueryFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each QueryFilter element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide. + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `API_Condition.html`_. + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the QueryFilter map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + :type scan_index_forward: boolean :param scan_index_forward: Specifies ascending (true) or descending - (false) traversal of the index. Amazon DynamoDB returns results - reflecting the requested order determined by the range key. If the - data type is Number, the results are returned in numeric order. For - String, the results are returned in order of ASCII character code - values. For Binary, Amazon DynamoDB treats each byte of the binary - data as unsigned when it compares binary values. + (false) traversal of the index. DynamoDB returns results reflecting + the requested order determined by the range key. If the data type + is Number, the results are returned in numeric order. For String, + the results are returned in order of ASCII character code values. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values. If ScanIndexForward is not specified, the results are returned in ascending order. @@ -947,12 +1487,17 @@ No set data types are allowed. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ - params = {'TableName': table_name, } + params = { + 'TableName': table_name, + 'KeyConditions': key_conditions, + } if index_name is not None: params['IndexName'] = index_name if select is not None: @@ -963,8 +1508,10 @@ params['Limit'] = limit if consistent_read is not None: params['ConsistentRead'] = consistent_read - if key_conditions is not None: - params['KeyConditions'] = key_conditions + if query_filter is not None: + params['QueryFilter'] = query_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if scan_index_forward is not None: params['ScanIndexForward'] = scan_index_forward if exclusive_start_key is not None: @@ -975,14 +1522,13 @@ body=json.dumps(params)) def scan(self, table_name, attributes_to_get=None, limit=None, - select=None, scan_filter=None, exclusive_start_key=None, - return_consumed_capacity=None, total_segments=None, - segment=None): + select=None, scan_filter=None, conditional_operator=None, + exclusive_start_key=None, return_consumed_capacity=None, + total_segments=None, segment=None): """ The Scan operation returns one or more items and item attributes by accessing every item in the table. To have - Amazon DynamoDB return fewer items, you can provide a - ScanFilter . + DynamoDB return fewer items, you can provide a ScanFilter . If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are @@ -1008,160 +1554,91 @@ retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. :type limit: integer :param limit: The maximum number of items to evaluate (not necessarily - the number of matching items). If Amazon DynamoDB processes the - number of items up to the limit while processing the results, it - stops the operation and returns the matching values up to that - point, and a LastEvaluatedKey to apply in a subsequent operation, - so that you can pick up where you left off. Also, if the processed - data set size exceeds 1 MB before Amazon DynamoDB reaches this - limit, it stops the operation and returns the matching values up to - the limit, and a LastEvaluatedKey to apply in a subsequent - operation to continue the operation. For more information see - `Query and Scan`_ in the Amazon DynamoDB Developer Guide. + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + LastEvaluatedKey to apply in a subsequent operation, so that you + can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + LastEvaluatedKey to apply in a subsequent operation to continue the + operation. For more information, see `Query and Scan`_ in the + Amazon DynamoDB Developer Guide. :type select: string :param select: The attributes to be returned in the result. You can - retrieve all item attributes, specific item attributes, the count - of matching items, or in the case of an index, some or all of the - attributes projected into the index. + retrieve all item attributes, specific item attributes, or the + count of matching items. - + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table, - this is the default. For an index, this mode causes Amazon DynamoDB - to fetch the full item from the table for each matching item in the - index. If the index is configured to project all item attributes, - the matching items will not be fetched from the table. Fetching - items from the table incurs additional throughput cost and latency. - + `ALL_PROJECTED_ATTRIBUTES`: Retrieves all attributes which have been - projected into the index. If the index is configured to project all - attributes, this is equivalent to specifying ALL_ATTRIBUTES . + + `ALL_ATTRIBUTES`: Returns all of the item attributes. + `COUNT`: Returns the number of matching items, rather than the matching items themselves. + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in AttributesToGet . This is equivalent to specifying AttributesToGet - without specifying any value for Select . If you are querying an - index and request only attributes that are projected into that - index, the operation will read only the index and not the table. If - any of the requested attributes are not projected into the index, - Amazon DynamoDB will need to fetch each matching item from the - table. This extra fetching incurs additional throughput cost and - latency. + without specifying any value for Select . - When neither Select nor AttributesToGet are specified, Amazon DynamoDB - defaults to `ALL_ATTRIBUTES` when accessing a table, and - `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use - both Select and AttributesToGet together in a single request, - unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage - is equivalent to specifying AttributesToGet without any value for - Select .) + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES`. You cannot use both Select and AttributesToGet + together in a single request, unless the value for Select is + `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying + AttributesToGet without any value for Select .) :type scan_filter: map :param scan_filter: Evaluates the scan results and returns only the desired values. - Multiple conditions are treated as "AND" operations: all conditions - must be met to be included in the results. - Each ScanConditions element consists of an attribute name to compare, - along with the following: + If you specify more than one condition in the ScanFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each ScanFilter element consists of an attribute name to compare, along + with the following: + AttributeValueList - One or more values to evaluate against the - supplied attribute. This list contains exactly one value, except - for a `BETWEEN` or `IN` comparison, in which case the list contains - two values. For type Number, value comparisons are numeric. String - value comparisons for greater than, equals, or less than are based - on ASCII character code values. For example, `a` is greater than - `A`, and `aa` is greater than `B`. For a list of code values, see + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. - For Binary, Amazon DynamoDB treats each byte of the binary data as + For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when - evaluating query expressions. + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide. + ComparisonOperator - A comparator for evaluating attributes. For - example, equals, greater than, less than, etc. Valid comparison - operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL - | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For - information on specifying data types in JSON, see `JSON Data - Format`_ in the Amazon DynamoDB Developer Guide. The following are - descriptions of each comparison operator. + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `Condition`_. + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the ScanFilter map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. - + `EQ` : Equal. AttributeValueList can contain only one AttributeValue - of type String, Number, or Binary (not a set). If an item contains - an AttributeValue of a different type than the one specified in the - request, the value does not match. For example, `{"S":"6"}` does - not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal - `{"NS":["6", "2", "1"]}`. - + `NE` : Not equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - equal `{"NS":["6", "2", "1"]}`. - + `LE` : Less than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `LT` : Less than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `GE` : Greater than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `GT` : Greater than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `NOT_NULL` : The attribute exists. - + `NULL` : The attribute does not exist. - + `CONTAINS` : checks for a subsequence, or value in a set. - AttributeValueList can contain only one AttributeValue of type - String, Number, or Binary (not a set). If the target attribute of - the comparison is a String, then the operation checks for a - substring match. If the target attribute of the comparison is - Binary, then the operation looks for a subsequence of the target - that matches the input. If the target attribute of the comparison - is a set ("SS", "NS", or "BS"), then the operation checks for a - member of the set (not as a substring). - + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a - value in a set. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If - the target attribute of the comparison is a String, then the - operation checks for the absence of a substring match. If the - target attribute of the comparison is Binary, then the operation - checks for the absence of a subsequence of the target that matches - the input. If the target attribute of the comparison is a set - ("SS", "NS", or "BS"), then the operation checks for the absence of - a member of the set (not as a substring). - + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain - only one AttributeValue of type String or Binary (not a Number or a - set). The target attribute of the comparison must be a String or - Binary (not a Number or a set). - + `IN` : checks for exact matches. AttributeValueList can contain more - than one AttributeValue of type String, Number, or Binary (not a - set). The target attribute of the comparison must be of the same - type and exact value to match. A String never matches a String set. - + `BETWEEN` : Greater than or equal to the first value, and less than - or equal to the second value. AttributeValueList must contain two - AttributeValue elements of the same type, either String, Number, or - Binary (not a set). A target attribute matches if the target value - is greater than, or equal to, the first element and less than, or - equal to, the second element. If an item contains an AttributeValue - of a different type than the one specified in the request, the - value does not match. For example, `{"S":"6"}` does not compare to - `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", - "2", "1"]}` + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. :type exclusive_start_key: map :param exclusive_start_key: The primary key of the first item that this @@ -1175,9 +1652,11 @@ corresponding value of LastEvaluatedKey . :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type total_segments: integer :param total_segments: For a parallel Scan request, TotalSegments @@ -1219,6 +1698,8 @@ params['Select'] = select if scan_filter is not None: params['ScanFilter'] = scan_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if exclusive_start_key is not None: params['ExclusiveStartKey'] = exclusive_start_key if return_consumed_capacity is not None: @@ -1231,8 +1712,8 @@ body=json.dumps(params)) def update_item(self, table_name, key, attribute_updates=None, - expected=None, return_values=None, - return_consumed_capacity=None, + expected=None, conditional_operator=None, + return_values=None, return_consumed_capacity=None, return_item_collection_metrics=None): """ Edits an existing item's attributes, or inserts a new item if @@ -1250,8 +1731,12 @@ :param table_name: The name of the table containing the item to update. :type key: map - :param key: The primary key that defines the item. Each element + :param key: The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type attribute_updates: map :param attribute_updates: The names of attributes to be modified, the @@ -1294,16 +1779,16 @@ If Value is a negative number, then it is subtracted from the existing attribute. If you use `ADD` to increment or decrement a number value for an item that doesn't exist before the update, - Amazon DynamoDB uses 0 as the initial value. In addition, if you - use `ADD` to update an existing item, and intend to increment or - decrement an attribute value which does not yet exist, Amazon - DynamoDB uses `0` as the initial value. For example, suppose that - the item you want to update does not yet have an attribute named - itemcount , but you decide to `ADD` the number `3` to this - attribute anyway, even though it currently does not exist. Amazon - DynamoDB will create the itemcount attribute, set its initial value - to `0`, and finally add `3` to it. The result will be a new - itemcount attribute in the item, with a value of `3`. + DynamoDB uses 0 as the initial value. In addition, if you use `ADD` + to update an existing item, and intend to increment or decrement an + attribute value which does not yet exist, DynamoDB uses `0` as the + initial value. For example, suppose that the item you want to + update does not yet have an attribute named itemcount , but you + decide to `ADD` the number `3` to this attribute anyway, even + though it currently does not exist. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute in the + item, with a value of `3`. + If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the @@ -1319,13 +1804,13 @@ number or is a set. Do not use `ADD` for any other data types. **If no item with the specified Key is found:** - + `PUT` - Amazon DynamoDB creates a new item with the specified primary - key, and then adds the attribute. + + `PUT` - DynamoDB creates a new item with the specified primary key, + and then adds the attribute. + `DELETE` - Nothing happens; there is no attribute to delete. - + `ADD` - Amazon DynamoDB creates an item with the supplied primary key - and number (or set of numbers) for the attribute value. The only - data types allowed are number and number set; no other data types - can be specified. + + `ADD` - DynamoDB creates an item with the supplied primary key and + number (or set of numbers) for the attribute value. The only data + types allowed are number and number set; no other data types can be + specified. @@ -1334,48 +1819,175 @@ the table's attribute definition. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the UpdateItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the UpdateItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + If you omit ConditionalOperator , then `AND` is the default. - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + The operation will succeed only if the entire map evaluates to true. :type return_values: string :param return_values: @@ -1396,15 +2008,17 @@ returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'TableName': table_name, 'Key': key, } @@ -1412,6 +2026,8 @@ params['AttributeUpdates'] = attribute_updates if expected is not None: params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if return_values is not None: params['ReturnValues'] = return_values if return_consumed_capacity is not None: @@ -1427,7 +2043,7 @@ Updates the provisioned throughput for the given table. Setting the throughput for a table helps you manage performance and is part of the provisioned throughput feature - of Amazon DynamoDB. + of DynamoDB. The provisioned throughput values can be upgraded or downgraded based on the maximums and minimums listed in the @@ -1442,22 +2058,23 @@ table returns to the `ACTIVE` state after the UpdateTable operation. - You cannot add, modify or delete local secondary indexes using - UpdateTable . Local secondary indexes can only be defined at - table creation time. + You cannot add, modify or delete indexes using UpdateTable . + Indexes can only be defined at table creation time. :type table_name: string :param table_name: The name of the table to be updated. :type provisioned_throughput: dict - :param provisioned_throughput: The provisioned throughput settings for - the specified table. The settings can be modified using the - UpdateTable operation. + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. For current minimum and maximum provisioned throughput values, see `Limits`_ in the Amazon DynamoDB Developer Guide. :type global_secondary_index_updates: list - :param global_secondary_index_updates: + :param global_secondary_index_updates: An array of one or more global + secondary indexes on the table, together with provisioned + throughput settings for each index. """ params = {'TableName': table_name, } diff -Nru python-boto-2.20.1/boto/dynamodb2/results.py python-boto-2.29.1/boto/dynamodb2/results.py --- python-boto-2.20.1/boto/dynamodb2/results.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/results.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ ... print res['username'] """ - def __init__(self): + def __init__(self, max_page_size=None): super(ResultSet, self).__init__() self.the_callable = None self.call_args = [] @@ -29,6 +29,9 @@ self._offset = -1 self._results_left = True self._last_key_seen = None + self._fetches = 0 + self._max_page_size = max_page_size + self._limit = None @property def first_key(self): @@ -65,6 +68,12 @@ self.fetch_more() if self._offset < len(self._results): + if self._limit is not None: + self._limit -= 1 + + if self._limit < 0: + raise StopIteration() + return self._results[self._offset] else: raise StopIteration() @@ -92,6 +101,14 @@ 'You must supply an object or function to be called.' ) + # We pop the ``limit``, if present, to track how many we should return + # to the user. This isn't the same as the ``limit`` that the low-level + # DDB api calls use (which limit page size, not the overall result set). + self._limit = kwargs.pop('limit', None) + + if self._limit < 0: + self._limit = None + self.the_callable = the_callable self.call_args = args self.call_kwargs = kwargs @@ -111,19 +128,34 @@ if self._last_key_seen is not None: kwargs[self.first_key] = self._last_key_seen + # If the page size is greater than limit set them + # to the same value + if self._limit and self._max_page_size > self._limit: + self._max_page_size = self._limit + + # Put in the max page size. + if self._max_page_size is not None: + kwargs['limit'] = self._max_page_size + elif self._limit is not None: + # If max_page_size is not set and limit is available + # use it as the page size + kwargs['limit'] = self._limit + results = self.the_callable(*args, **kwargs) + self._fetches += 1 new_results = results.get('results', []) self._last_key_seen = results.get('last_key', None) if len(new_results): self._results.extend(results['results']) - # Decrease the limit, if it's present. - if self.call_kwargs.get('limit'): - self.call_kwargs['limit'] -= len(results['results']) - # and if limit hits zero, we don't have any more - # results to look for - if 0 == self.call_kwargs['limit']: + # Check the limit, if it's present. + if self._limit is not None and self._limit >= 0: + limit = self._limit + limit -= len(results['results']) + # If we've exceeded the limit, we don't have any more + # results to look for. + if limit <= 0: self._results_left = False if self._last_key_seen is None: diff -Nru python-boto-2.20.1/boto/dynamodb2/table.py python-boto-2.29.1/boto/dynamodb2/table.py --- python-boto-2.20.1/boto/dynamodb2/table.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/dynamodb2/table.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,11 +1,15 @@ import boto from boto.dynamodb2 import exceptions from boto.dynamodb2.fields import (HashKey, RangeKey, - AllIndex, KeysOnlyIndex, IncludeIndex) + AllIndex, KeysOnlyIndex, IncludeIndex, + GlobalAllIndex, GlobalKeysOnlyIndex, + GlobalIncludeIndex) from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet -from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS +from boto.dynamodb2.types import (Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS, + STRING) +from boto.exception import JSONResponseError class Table(object): @@ -21,7 +25,7 @@ max_batch_get = 100 def __init__(self, table_name, schema=None, throughput=None, indexes=None, - connection=None): + global_indexes=None, connection=None): """ Sets up a new in-memory ``Table``. @@ -48,6 +52,10 @@ Optionally accepts a ``indexes`` parameter, which should be a list of ``BaseIndexField`` subclasses representing the desired indexes. + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + Optionally accepts a ``connection`` parameter, which should be a ``DynamoDBConnection`` instance (or subclass). This is primarily useful for specifying alternate connection parameters. @@ -67,13 +75,22 @@ ... 'write': 10, ... }, indexes=[ ... KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username') ... RangeKey('date_joined') ... ]), - ... ], - ... connection=dynamodb2.connect_to_region('us-west-2', - ... aws_access_key_id='key', - ... aws_secret_access_key='key', - ... )) + ... ], global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write":10, + ... }), + ... ], connection=dynamodb2.connect_to_region('us-west-2', + ... aws_access_key_id='key', + ... aws_secret_access_key='key', + ... )) """ self.table_name = table_name @@ -84,6 +101,7 @@ } self.schema = schema self.indexes = indexes + self.global_indexes = global_indexes if self.connection is None: self.connection = DynamoDBConnection() @@ -95,7 +113,7 @@ @classmethod def create(cls, table_name, schema, throughput=None, indexes=None, - connection=None): + global_indexes=None, connection=None): """ Creates a new table in DynamoDB & returns an in-memory ``Table`` object. @@ -127,6 +145,10 @@ Optionally accepts a ``indexes`` parameter, which should be a list of ``BaseIndexField`` subclasses representing the desired indexes. + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + Optionally accepts a ``connection`` parameter, which should be a ``DynamoDBConnection`` instance (or subclass). This is primarily useful for specifying alternate connection parameters. @@ -142,7 +164,15 @@ ... }, indexes=[ ... KeysOnlyIndex('MostRecentlyJoined', parts=[ ... RangeKey('date_joined') - ... ]), + ... ]), global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write':10, + ... }), ... ]) """ @@ -155,13 +185,18 @@ if indexes is not None: table.indexes = indexes + if global_indexes is not None: + table.global_indexes = global_indexes + # Prep the schema. raw_schema = [] attr_defs = [] + seen_attrs = set() for field in table.schema: raw_schema.append(field.schema()) # Build the attributes off what we know. + seen_attrs.add(field.name) attr_defs.append(field.definition()) raw_throughput = { @@ -170,23 +205,24 @@ } kwargs = {} - if table.indexes: - # Prep the LSIs. - raw_lsi = [] - - for index_field in table.indexes: - raw_lsi.append(index_field.schema()) - # Again, build the attributes off what we know. - # HOWEVER, only add attributes *NOT* already seen. - attr_define = index_field.definition() - - for part in attr_define: - attr_names = [attr['AttributeName'] for attr in attr_defs] - - if not part['AttributeName'] in attr_names: - attr_defs.append(part) + kwarg_map = { + 'indexes': 'local_secondary_indexes', + 'global_indexes': 'global_secondary_indexes', + } + for index_attr in ('indexes', 'global_indexes'): + table_indexes = getattr(table, index_attr) + if table_indexes: + raw_indexes = [] + for index_field in table_indexes: + raw_indexes.append(index_field.schema()) + # Make sure all attributes specified in the indexes are + # added to the definition + for field in index_field.parts: + if field.name not in seen_attrs: + seen_attrs.add(field.name) + attr_defs.append(field.definition()) - kwargs['local_secondary_indexes'] = raw_lsi + kwargs[kwarg_map[index_attr]] = raw_indexes table.connection.create_table( table_name=table.table_name, @@ -197,18 +233,29 @@ ) return table - def _introspect_schema(self, raw_schema): + def _introspect_schema(self, raw_schema, raw_attributes=None): """ Given a raw schema structure back from a DynamoDB response, parse out & build the high-level Python objects that represent them. """ schema = [] + sane_attributes = {} + + if raw_attributes: + for field in raw_attributes: + sane_attributes[field['AttributeName']] = field['AttributeType'] for field in raw_schema: + data_type = sane_attributes.get(field['AttributeName'], STRING) + if field['KeyType'] == 'HASH': - schema.append(HashKey(field['AttributeName'])) + schema.append( + HashKey(field['AttributeName'], data_type=data_type) + ) elif field['KeyType'] == 'RANGE': - schema.append(RangeKey(field['AttributeName'])) + schema.append( + RangeKey(field['AttributeName'], data_type=data_type) + ) else: raise exceptions.UnknownSchemaFieldError( "%s was seen, but is unknown. Please report this at " @@ -245,7 +292,7 @@ ) name = field['IndexName'] - kwargs['parts'] = self._introspect_schema(field['KeySchema']) + kwargs['parts'] = self._introspect_schema(field['KeySchema'], None) indexes.append(index_klass(name, **kwargs)) return indexes @@ -284,7 +331,8 @@ if not self.schema: # Since we have the data, build the schema. raw_schema = result['Table'].get('KeySchema', []) - self.schema = self._introspect_schema(raw_schema) + raw_attributes = result['Table'].get('AttributeDefinitions', []) + self.schema = self._introspect_schema(raw_schema, raw_attributes) if not self.indexes: # Build the index information as well. @@ -294,7 +342,7 @@ # This is leaky. return result - def update(self, throughput): + def update(self, throughput, global_indexes=None): """ Updates table attributes in DynamoDB. @@ -316,12 +364,46 @@ ... }) True + # To also update the global index(es) throughput. + >>> users.update(throughput={ + ... 'read': 20, + ... 'write': 10, + ... }, + ... global_secondary_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + """ self.throughput = throughput - self.connection.update_table(self.table_name, { + data = { 'ReadCapacityUnits': int(self.throughput['read']), 'WriteCapacityUnits': int(self.throughput['write']), - }) + } + gsi_data = None + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + self.connection.update_table( + self.table_name, + provisioned_throughput=data, + global_secondary_index_updates=gsi_data + ) return True def delete(self): @@ -368,7 +450,7 @@ return raw_key - def get_item(self, consistent=False, **kwargs): + def get_item(self, consistent=False, attributes=None, **kwargs): """ Fetches an item (record) from a table in DynamoDB. @@ -380,6 +462,10 @@ a consistent (but more expensive) read from DynamoDB. (Default: ``False``) + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldname to fetch. (Default: ``None``, which means all fields + should be fetched) + Returns an ``Item`` instance containing all the data for that record. Example:: @@ -412,12 +498,54 @@ item_data = self.connection.get_item( self.table_name, raw_key, + attributes_to_get=attributes, consistent_read=consistent ) + if 'Item' not in item_data: + raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs) item = Item(self) item.load(item_data) return item + def has_item(self, **kwargs): + """ + Return whether an item (record) exists within a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldnames to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns ``True`` if an ``Item`` is present, ``False`` if not. + + Example:: + + # Simple, just hash-key schema. + >>> users.has_item(username='johndoe') + True + + # Complex schema, item not present. + >>> users.has_item( + ... username='johndoe', + ... date_joined='2014-01-07' + ... ) + False + + """ + try: + self.get_item(**kwargs) + except (JSONResponseError, exceptions.ItemNotFound): + return False + + return True + def lookup(self, *args, **kwargs): """ Look up an entry in DynamoDB. This is mostly backwards compatible @@ -456,7 +584,6 @@ data[self.schema[x].name] = arg return Item(self, data=data) - def put_item(self, data, overwrite=False): """ Saves an entire item to DynamoDB. @@ -630,6 +757,9 @@ An internal method for taking query/scan-style ``**kwargs`` & turning them into the raw structure DynamoDB expects for filtering. """ + if filter_kwargs is None: + return + filters = {} for field_and_op, value in filter_kwargs.items(): @@ -687,19 +817,36 @@ return filters def query(self, limit=None, index=None, reverse=False, consistent=False, - attributes=None, **filter_kwargs): + attributes=None, max_page_size=None, **filter_kwargs): + """ + **WARNING:** This method is provided **strictly** for + backward-compatibility. It returns results in an incorrect order. + + If you are writing new code, please use ``Table.query_2``. + """ + reverse = not reverse + return self.query_2(limit=limit, index=index, reverse=reverse, + consistent=consistent, attributes=attributes, + max_page_size=max_page_size, **filter_kwargs) + + def query_2(self, limit=None, index=None, reverse=False, + consistent=False, attributes=None, max_page_size=None, + query_filter=None, conditional_operator=None, + **filter_kwargs): """ Queries for a set of matching items in a DynamoDB table. Queries can be performed against a hash key, a hash+range key or - against any data stored in your local secondary indexes. + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. **Note** - You can not query against arbitrary fields within the data - stored in DynamoDB. + stored in DynamoDB unless you specify ``query_filter`` values. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern - ``__=``. + ``__=``. Query filters + are specified in the same way. Optionally accepts a ``limit`` parameter, which should be an integer count of the total number of items to return. (Default: ``None`` - @@ -710,7 +857,7 @@ (Default: ``None``) Optionally accepts a ``reverse`` parameter, which will present the - results in reverse order. (Default: ``None`` - normal order) + results in reverse order. (Default: ``False`` - normal order) Optionally accepts a ``consistent`` parameter, which should be a boolean. If you provide ``True``, it will force a consistent read of @@ -722,6 +869,21 @@ from DynamoDB. This uses the ``AttributesToGet`` and set's ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + Returns a ``ResultSet``, which transparently handles the pagination of results you get back. @@ -760,19 +922,38 @@ 'John' 'Fred' + # Filter by non-indexed field(s) + >>> results = users.query( + ... last_name__eq='Doe', + ... reverse=True, + ... query_filter={ + ... 'first_name__beginswith': 'A' + ... } + ... ) + >>> for res in results: + ... print res['first_name'] + ' ' + res['last_name'] + 'Alice Doe' + """ if self.schema: - if len(self.schema) == 1 and len(filter_kwargs) <= 1: - raise exceptions.QueryError( - "You must specify more than one key to filter on." - ) + if len(self.schema) == 1: + if len(filter_kwargs) <= 1: + if not self.global_indexes or not len(self.global_indexes): + # If the schema only has one field, there's <= 1 filter + # param & no Global Secondary Indexes, this is user + # error. Bail early. + raise exceptions.QueryError( + "You must specify more than one key to filter on." + ) if attributes is not None: select = 'SPECIFIC_ATTRIBUTES' else: select = None - results = ResultSet() + results = ResultSet( + max_page_size=max_page_size + ) kwargs = filter_kwargs.copy() kwargs.update({ 'limit': limit, @@ -780,21 +961,26 @@ 'reverse': reverse, 'consistent': consistent, 'select': select, - 'attributes_to_get': attributes + 'attributes_to_get': attributes, + 'query_filter': query_filter, + 'conditional_operator': conditional_operator, }) results.to_call(self._query, **kwargs) return results - def query_count(self, index=None, consistent=False, **filter_kwargs): + def query_count(self, index=None, consistent=False, conditional_operator=None, + query_filter=None, **filter_kwargs): """ Queries the exact count of matching items in a DynamoDB table. Queries can be performed against a hash key, a hash+range key or - against any data stored in your local secondary indexes. + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern - ``__=``. + ``__=``. Query filters + are specified in the same way. Optionally accepts an ``index`` parameter, which should be a string of name of the local secondary index you want to query against. @@ -805,6 +991,15 @@ the data (more expensive). (Default: ``False`` - use eventually consistent reads) + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + Returns an integer which represents the exact amount of matched items. @@ -829,18 +1024,25 @@ using=QUERY_OPERATORS ) + built_query_filter = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + raw_results = self.connection.query( self.table_name, index_name=index, consistent_read=consistent, select='COUNT', key_conditions=key_conditions, + query_filter=built_query_filter, + conditional_operator=conditional_operator, ) return int(raw_results.get('Count', 0)) def _query(self, limit=None, index=None, reverse=False, consistent=False, exclusive_start_key=None, select=None, attributes_to_get=None, - **filter_kwargs): + query_filter=None, conditional_operator=None, **filter_kwargs): """ The internal method that performs the actual queries. Used extensively by ``ResultSet`` to perform each (paginated) request. @@ -848,12 +1050,15 @@ kwargs = { 'limit': limit, 'index_name': index, - 'scan_index_forward': reverse, 'consistent_read': consistent, 'select': select, - 'attributes_to_get': attributes_to_get + 'attributes_to_get': attributes_to_get, + 'conditional_operator': conditional_operator, } + if reverse: + kwargs['scan_index_forward'] = False + if exclusive_start_key: kwargs['exclusive_start_key'] = {} @@ -867,6 +1072,11 @@ using=QUERY_OPERATORS ) + kwargs['query_filter'] = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + raw_results = self.connection.query( self.table_name, **kwargs @@ -893,13 +1103,14 @@ } def scan(self, limit=None, segment=None, total_segments=None, + max_page_size=None, attributes=None, conditional_operator=None, **filter_kwargs): """ Scans across all items within a DynamoDB table. Scans can be performed against a hash key or a hash+range key. You can additionally filter the results after the table has been read but - before the response is returned. + before the response is returned by using query filters. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern @@ -909,6 +1120,26 @@ count of the total number of items to return. (Default: ``None`` - all results) + Optionally accepts a ``segment`` parameter, which should be an integer + of the segment to retrieve on. Please see the documentation about + Parallel Scans (Default: ``None`` - no segments) + + Optionally accepts a ``total_segments`` parameter, which should be an + integer count of number of segments to divide the table into. + Please see the documentation about Parallel Scans (Default: ``None`` - + no segments) + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + Returns a ``ResultSet``, which transparently handles the pagination of results you get back. @@ -935,18 +1166,23 @@ 'Alice' """ - results = ResultSet() + results = ResultSet( + max_page_size=max_page_size + ) kwargs = filter_kwargs.copy() kwargs.update({ 'limit': limit, 'segment': segment, 'total_segments': total_segments, + 'attributes': attributes, + 'conditional_operator': conditional_operator, }) results.to_call(self._scan, **kwargs) return results def _scan(self, limit=None, exclusive_start_key=None, segment=None, - total_segments=None, **filter_kwargs): + total_segments=None, attributes=None, conditional_operator=None, + **filter_kwargs): """ The internal method that performs the actual scan. Used extensively by ``ResultSet`` to perform each (paginated) request. @@ -955,6 +1191,8 @@ 'limit': limit, 'segment': segment, 'total_segments': total_segments, + 'attributes_to_get': attributes, + 'conditional_operator': conditional_operator, } if exclusive_start_key: @@ -1032,7 +1270,7 @@ # We pass the keys to the constructor instead, so it can maintain it's # own internal state as to what keys have been processed. results = BatchGetResultSet(keys=keys, max_batch_get=self.max_batch_get) - results.to_call(self._batch_get, consistent=False) + results.to_call(self._batch_get, consistent=consistent) return results def _batch_get(self, keys, consistent=False): diff -Nru python-boto-2.20.1/boto/ec2/address.py python-boto-2.29.1/boto/ec2/address.py --- python-boto-2.20.1/boto/ec2/address.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/address.py 2014-05-30 20:49:34.000000000 +0000 @@ -37,7 +37,7 @@ """ def __init__(self, connection=None, public_ip=None, instance_id=None): - EC2Object.__init__(self, connection) + super(Address, self).__init__(connection) self.connection = connection self.public_ip = public_ip self.instance_id = instance_id @@ -89,14 +89,23 @@ delete = release - def associate(self, instance_id, dry_run=False): + def associate(self, instance_id, allow_reassociation=False, dry_run=False): """ Associate this Elastic IP address with a currently running instance. :see: :meth:`boto.ec2.connection.EC2Connection.associate_address` """ + if self.allocation_id: + return self.connection.associate_address( + instance_id, + self.public_ip, + allocation_id=self.allocation_id, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) return self.connection.associate_address( instance_id, self.public_ip, + allow_reassociation=allow_reassociation, dry_run=dry_run ) diff -Nru python-boto-2.20.1/boto/ec2/autoscale/group.py python-boto-2.29.1/boto/ec2/autoscale/group.py --- python-boto-2.20.1/boto/ec2/autoscale/group.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/autoscale/group.py 2014-05-30 20:49:34.000000000 +0000 @@ -98,7 +98,8 @@ health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, desired_capacity=None, min_size=None, max_size=None, - tags=None, termination_policies=None, **kwargs): + tags=None, termination_policies=None, instance_id=None, + **kwargs): """ Creates a new AutoScalingGroup with the specified name. @@ -145,12 +146,12 @@ :param placement_group: Physical location of your cluster placement group created in Amazon EC2. - :type vpc_zone_identifier: str - :param vpc_zone_identifier: The subnet identifier of the Virtual - Private Cloud. - + :type vpc_zone_identifier: str or list + :param vpc_zone_identifier: A comma-separated string or python list of + the subnet identifiers of the Virtual Private Cloud. + :type tags: list - :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s + :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s :type termination_policies: list :param termination_policies: A list of termination policies. Valid values @@ -158,6 +159,10 @@ "ClosestToNextInstanceHour", "Default". If no value is specified, the "Default" value is used. + :type instance_id: str + :param instance_id: The ID of the Amazon EC2 instance you want to use + to create the Auto Scaling group. + :rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup` :return: An autoscale group. """ @@ -183,11 +188,14 @@ self.health_check_type = health_check_type self.placement_group = placement_group self.autoscaling_group_arn = None + if type(vpc_zone_identifier) is list: + vpc_zone_identifier = ','.join(vpc_zone_identifier) self.vpc_zone_identifier = vpc_zone_identifier self.instances = None self.tags = tags or None termination_policies = termination_policies or [] self.termination_policies = ListElement(termination_policies) + self.instance_id = instance_id # backwards compatible access to 'cooldown' param def _get_cooldown(self): @@ -251,6 +259,8 @@ self.health_check_type = value elif name == 'VPCZoneIdentifier': self.vpc_zone_identifier = value + elif name == 'InstanceId': + self.instance_id = value else: setattr(self, name, value) @@ -304,7 +314,7 @@ 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', - 'autoscaling:TEST_NOTIFICATION' + 'autoscaling:TEST_NOTIFICATION' """ return self.connection.put_notification_configuration(self, topic, diff -Nru python-boto-2.20.1/boto/ec2/autoscale/__init__.py python-boto-2.29.1/boto/ec2/autoscale/__init__.py --- python-boto-2.20.1/boto/ec2/autoscale/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/autoscale/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,7 +31,7 @@ import boto from boto.connection import AWSQueryConnection -from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions from boto.ec2.autoscale.request import Request from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.group import AutoScalingGroup @@ -44,18 +44,9 @@ from boto.ec2.autoscale.instance import Instance from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction from boto.ec2.autoscale.tag import Tag +from boto.ec2.autoscale.limits import AccountLimits -RegionData = { - 'us-east-1': 'autoscaling.us-east-1.amazonaws.com', - 'us-gov-west-1': 'autoscaling.us-gov-west-1.amazonaws.com', - 'us-west-1': 'autoscaling.us-west-1.amazonaws.com', - 'us-west-2': 'autoscaling.us-west-2.amazonaws.com', - 'sa-east-1': 'autoscaling.sa-east-1.amazonaws.com', - 'eu-west-1': 'autoscaling.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'autoscaling.ap-southeast-2.amazonaws.com', -} +RegionData = load_regions().get('autoscaling', {}) def regions(): @@ -65,13 +56,7 @@ :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=AutoScaleConnection) - regions.append(region) - return regions + return get_regions('autoscaling', connection_cls=AutoScaleConnection) def connect_to_region(region_name, **kw_params): @@ -102,26 +87,31 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None, + use_block_device_types=False): """ Init method to create a new connection to the AutoScaling service. B{Note:} The host argument is overridden by the host specified in the boto configuration file. + + """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, AutoScaleConnection) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + self.use_block_device_types = use_block_device_types + super(AutoScaleConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path=path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -163,7 +153,7 @@ # get availability zone information (required param) zones = as_group.availability_zones self.build_list_params(params, zones, 'AvailabilityZones') - if as_group.desired_capacity: + if as_group.desired_capacity is not None: params['DesiredCapacity'] = as_group.desired_capacity if as_group.vpc_zone_identifier: params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier @@ -175,6 +165,8 @@ params['DefaultCooldown'] = as_group.default_cooldown if as_group.placement_group: params['PlacementGroup'] = as_group.placement_group + if as_group.instance_id: + params['InstanceId'] = as_group.instance_id if as_group.termination_policies: self.build_list_params(params, as_group.termination_policies, 'TerminationPolicies') @@ -189,6 +181,16 @@ tag.build_params(params, i + 1) return self.get_object(op, params, Request) + def attach_instances(self, name, instance_ids): + """ + Attach instances to an autoscaling group. + """ + params = { + 'AutoScalingGroupName': name, + } + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('AttachInstances', params) + def create_auto_scaling_group(self, as_group): """ Create auto scaling group. @@ -245,9 +247,25 @@ params['AssociatePublicIpAddress'] = 'true' elif launch_config.associate_public_ip_address is False: params['AssociatePublicIpAddress'] = 'false' + if launch_config.volume_type: + params['VolumeType'] = launch_config.volume_type + if launch_config.delete_on_termination: + params['DeleteOnTermination'] = 'true' + else: + params['DeleteOnTermination'] = 'false' + if launch_config.iops: + params['Iops'] = launch_config.iops return self.get_object('CreateLaunchConfiguration', params, Request, verb='POST') + def get_account_limits(self): + """ + Returns the limits for the Auto Scaling resources currently granted for + your AWS account. + """ + params = {} + return self.get_object('DescribeAccountLimits', params, AccountLimits) + def create_scaling_policy(self, scaling_policy): """ Creates a new Scaling Policy. diff -Nru python-boto-2.20.1/boto/ec2/autoscale/launchconfig.py python-boto-2.29.1/boto/ec2/autoscale/launchconfig.py --- python-boto-2.20.1/boto/ec2/autoscale/launchconfig.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/autoscale/launchconfig.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,14 +21,16 @@ # IN THE SOFTWARE. from datetime import datetime -from boto.resultset import ResultSet from boto.ec2.elb.listelement import ListElement +# Namespacing issue with deprecated local class +from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM +from boto.resultset import ResultSet import boto.utils import base64 -# this should use the corresponding object from boto.ec2 - +# this should use the corresponding object from boto.ec2 +# Currently in use by deprecated local BlockDeviceMapping class class Ebs(object): def __init__(self, connection=None, snapshot_id=None, volume_size=None): self.connection = connection @@ -65,12 +67,16 @@ # this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping +# Currently in use by deprecated code for backwards compatability +# Removing this class can also remove the Ebs class in this same file class BlockDeviceMapping(object): - def __init__(self, connection=None, device_name=None, virtual_name=None): + def __init__(self, connection=None, device_name=None, virtual_name=None, + ebs=None, no_device=None): self.connection = connection - self.device_name = None - self.virtual_name = None - self.ebs = None + self.device_name = device_name + self.virtual_name = virtual_name + self.ebs = ebs + self.no_device = no_device def __repr__(self): return 'BlockDeviceMapping(%s, %s)' % (self.device_name, @@ -86,6 +92,8 @@ self.device_name = value elif name == 'VirtualName': self.virtual_name = value + elif name == 'NoDevice': + self.no_device = bool(value) class LaunchConfiguration(object): @@ -95,7 +103,8 @@ ramdisk_id=None, block_device_mappings=None, instance_monitoring=False, spot_price=None, instance_profile_name=None, ebs_optimized=False, - associate_public_ip_address=None): + associate_public_ip_address=None, volume_type=None, + delete_on_termination=True, iops=None, use_block_device_types=False): """ A launch configuration. @@ -147,8 +156,9 @@ :param ebs_optimized: Specifies whether the instance is optimized for EBS I/O (true) or not (false). + :type associate_public_ip_address: bool - :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. + :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. """ self.connection = connection @@ -170,6 +180,13 @@ self.launch_configuration_arn = None self.ebs_optimized = ebs_optimized self.associate_public_ip_address = associate_public_ip_address + self.volume_type = volume_type + self.delete_on_termination = delete_on_termination + self.iops = iops + self.use_block_device_types = use_block_device_types + + if connection is not None: + self.use_block_device_types = connection.use_block_device_types def __repr__(self): return 'LaunchConfiguration:%s' % self.name @@ -178,8 +195,10 @@ if name == 'SecurityGroups': return self.security_groups elif name == 'BlockDeviceMappings': - self.block_device_mappings = ResultSet([('member', - BlockDeviceMapping)]) + if self.use_block_device_types: + self.block_device_mappings = BDM() + else: + self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)]) return self.block_device_mappings elif name == 'InstanceMonitoring': self.instance_monitoring = InstanceMonitoring(self) @@ -215,6 +234,17 @@ self.instance_profile_name = value elif name == 'EbsOptimized': self.ebs_optimized = True if value.lower() == 'true' else False + elif name == 'AssociatePublicIpAddress': + self.associate_public_ip_address = True if value.lower() == 'true' else False + elif name == 'VolumeType': + self.volume_type = value + elif name == 'DeleteOnTermination': + if value.lower() == 'true': + self.delete_on_termination = True + else: + self.delete_on_termination = False + elif name == 'Iops': + self.iops = int(value) else: setattr(self, name, value) diff -Nru python-boto-2.20.1/boto/ec2/autoscale/limits.py python-boto-2.29.1/boto/ec2/autoscale/limits.py --- python-boto-2.20.1/boto/ec2/autoscale/limits.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/autoscale/limits.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class AccountLimits(object): + def __init__(self, connection=None): + self.connection = connection + self.max_autoscaling_groups = None + self.max_launch_configurations = None + + def __repr__(self): + return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups, + self.max_launch_configurations) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'RequestId': + self.request_id = value + elif name == 'MaxNumberOfAutoScalingGroups': + self.max_autoscaling_groups = int(value) + elif name == 'MaxNumberOfLaunchConfigurations': + self.max_launch_configurations = int(value) + else: + setattr(self, name, value) + diff -Nru python-boto-2.20.1/boto/ec2/autoscale/policy.py python-boto-2.29.1/boto/ec2/autoscale/policy.py --- python-boto-2.20.1/boto/ec2/autoscale/policy.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/autoscale/policy.py 2014-05-30 20:49:34.000000000 +0000 @@ -47,16 +47,17 @@ class AdjustmentType(object): def __init__(self, connection=None): self.connection = connection - self.adjustment_types = ListElement([]) + self.adjustment_type = None def __repr__(self): - return 'AdjustmentType:%s' % self.adjustment_types + return 'AdjustmentType:%s' % self.adjustment_type def startElement(self, name, attrs, connection): - if name == 'AdjustmentType': - return self.adjustment_types + return def endElement(self, name, value, connection): + if name == 'AdjustmentType': + self.adjustment_type = value return diff -Nru python-boto-2.20.1/boto/ec2/blockdevicemapping.py python-boto-2.29.1/boto/ec2/blockdevicemapping.py --- python-boto-2.20.1/boto/ec2/blockdevicemapping.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/blockdevicemapping.py 2014-05-30 20:49:34.000000000 +0000 @@ -55,25 +55,26 @@ pass def endElement(self, name, value, connection): + lname = name.lower() if name == 'volumeId': self.volume_id = value - elif name == 'virtualName': + elif lname == 'virtualname': self.ephemeral_name = value - elif name == 'NoDevice': + elif lname == 'nodevice': self.no_device = (value == 'true') - elif name == 'snapshotId': + elif lname == 'snapshotid': self.snapshot_id = value - elif name == 'volumeSize': + elif lname == 'volumesize': self.size = int(value) - elif name == 'status': + elif lname == 'status': self.status = value - elif name == 'attachTime': + elif lname == 'attachtime': self.attach_time = value - elif name == 'deleteOnTermination': + elif lname == 'deleteontermination': self.delete_on_termination = (value == 'true') - elif name == 'volumeType': + elif lname == 'volumetype': self.volume_type = value - elif name == 'iops': + elif lname == 'iops': self.iops = int(value) else: setattr(self, name, value) @@ -105,14 +106,16 @@ self.current_value = None def startElement(self, name, attrs, connection): - if name == 'ebs' or name == 'virtualName': + lname = name.lower() + if lname in ['ebs', 'virtualname']: self.current_value = BlockDeviceType(self) return self.current_value def endElement(self, name, value, connection): - if name == 'device' or name == 'deviceName': + lname = name.lower() + if lname in ['device', 'devicename']: self.current_name = value - elif name == 'item': + elif lname in ['item', 'member']: self[self.current_name] = self.current_value def ec2_build_list_params(self, params, prefix=''): diff -Nru python-boto-2.20.1/boto/ec2/bundleinstance.py python-boto-2.29.1/boto/ec2/bundleinstance.py --- python-boto-2.20.1/boto/ec2/bundleinstance.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/bundleinstance.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,21 +14,21 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ -Represents an EC2 Bundle Task +Represents an EC2 Bundle Task """ from boto.ec2.ec2object import EC2Object class BundleInstanceTask(EC2Object): - + def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(BundleInstanceTask, self).__init__(connection) self.id = None self.instance_id = None self.progress = None @@ -38,7 +38,7 @@ self.prefix = None self.upload_policy = None self.upload_policy_signature = None - self.update_time = None + self.update_time = None self.code = None self.message = None diff -Nru python-boto-2.20.1/boto/ec2/cloudwatch/__init__.py python-boto-2.29.1/boto/ec2/cloudwatch/__init__.py --- python-boto-2.20.1/boto/ec2/cloudwatch/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/cloudwatch/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -28,20 +28,10 @@ from boto.ec2.cloudwatch.metric import Metric from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem from boto.ec2.cloudwatch.datapoint import Datapoint -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions import boto -RegionData = { - 'us-east-1': 'monitoring.us-east-1.amazonaws.com', - 'us-gov-west-1': 'monitoring.us-gov-west-1.amazonaws.com', - 'us-west-1': 'monitoring.us-west-1.amazonaws.com', - 'us-west-2': 'monitoring.us-west-2.amazonaws.com', - 'sa-east-1': 'monitoring.sa-east-1.amazonaws.com', - 'eu-west-1': 'monitoring.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'monitoring.ap-southeast-2.amazonaws.com', -} +RegionData = load_regions().get('cloudwatch', {}) def regions(): @@ -51,13 +41,7 @@ :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=CloudWatchConnection) - regions.append(region) - return regions + return get_regions('cloudwatch', connection_cls=CloudWatchConnection) def connect_to_region(region_name, **kw_params): @@ -90,7 +74,7 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2 Monitoring Service. @@ -107,14 +91,15 @@ if self.region.name == 'eu-west-1': validate_certs = False - AWSQueryConnection.__init__(self, aws_access_key_id, + super(CloudWatchConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -177,11 +162,11 @@ metric_data['StatisticValues.Minimum'] = s['minimum'] metric_data['StatisticValues.SampleCount'] = s['samplecount'] metric_data['StatisticValues.Sum'] = s['sum'] - if value != None: + if value is not None: msg = 'You supplied a value and statistics for a ' + \ 'metric.Posting statistics and not value.' boto.log.warn(msg) - elif value != None: + elif value is not None: metric_data['Value'] = v else: raise Exception('Must specify a value or statistics to put.') @@ -272,9 +257,13 @@ pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension - name that you want to filter on, a list of values to - filter on or None if you want all metrics with that - Dimension name. + name that you want to filter on or None if you want all + metrics with that Dimension name. To be included in the + result a metric must contain all specified dimensions, + although the metric may contain additional dimensions beyond + the requested metrics. The Dimension names, and values must + be strings between 1 and 250 characters long. A maximum of + 10 dimensions are allowed. :type metric_name: str :param metric_name: The name of the Metric to filter against. If None, diff -Nru python-boto-2.20.1/boto/ec2/connection.py python-boto-2.29.1/boto/ec2/connection.py --- python-boto-2.20.1/boto/ec2/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,6 +31,7 @@ from datetime import timedelta import boto +from boto.auth import detect_potential_sigv4 from boto.connection import AWSQueryConnection from boto.resultset import ResultSet from boto.ec2.image import Image, ImageAttribute, CopyImage @@ -57,7 +58,7 @@ from boto.ec2.bundleinstance import BundleInstanceTask from boto.ec2.placementgroup import PlacementGroup from boto.ec2.tag import Tag -from boto.ec2.vmtype import VmType +from boto.ec2.instancetype import InstanceType from boto.ec2.instancestatus import InstanceStatusSet from boto.ec2.volumestatus import VolumeStatusSet from boto.ec2.networkinterface import NetworkInterface @@ -70,7 +71,7 @@ class EC2Connection(AWSQueryConnection): - APIVersion = boto.config.get('Boto', 'ec2_version', '2013-10-15') + APIVersion = boto.config.get('Boto', 'ec2_version', '2014-05-01') DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', 'ec2.us-east-1.amazonaws.com') @@ -82,7 +83,7 @@ proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2. """ @@ -90,23 +91,25 @@ region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(EC2Connection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) if api_version: self.APIVersion = api_version + @detect_potential_sigv4 def _required_auth_capability(self): return ['ec2'] def get_params(self): """ - Returns a dictionary containing the value of of all of the keyword + Returns a dictionary containing the value of all of the keyword arguments passed when constructing this connection. """ param_names = ['aws_access_key_id', 'aws_secret_access_key', @@ -262,7 +265,9 @@ architecture=None, kernel_id=None, ramdisk_id=None, root_device_name=None, block_device_map=None, dry_run=False, virtualization_type=None, - snapshot_id=None): + sriov_net_support=None, + snapshot_id=None, + delete_root_volume_on_termination=False): """ Register an image. @@ -301,11 +306,22 @@ * paravirtual * hvm + :type sriov_net_support: string + :param sriov_net_support: Advanced networking support. + Valid choices are: + * simple + :type snapshot_id: string :param snapshot_id: A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name - + + :type delete_root_volume_on_termination: bool + :param delete_root_volume_on_termination: Whether to delete the root + volume of the image after instance termination. Only applies when + creating image from snapshot_id. Defaults to False. Note that + leaving volumes behind after instance termination is not free. + :rtype: string :return: The new image id """ @@ -325,7 +341,8 @@ if root_device_name: params['RootDeviceName'] = root_device_name if snapshot_id: - root_vol = BlockDeviceType(snapshot_id=snapshot_id) + root_vol = BlockDeviceType(snapshot_id=snapshot_id, + delete_on_termination=delete_root_volume_on_termination) block_device_map = BlockDeviceMapping() block_device_map[root_device_name] = root_vol if block_device_map: @@ -334,7 +351,9 @@ params['DryRun'] = 'true' if virtualization_type: params['VirtualizationType'] = virtualization_type - + if sriov_net_support: + params['SriovNetSupport'] = sriov_net_support + rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') image_id = getattr(rs, 'imageId', None) @@ -591,15 +610,24 @@ :rtype: list :return: A list of :class:`boto.ec2.instance.Instance` """ - reservations = self.get_all_reservations(instance_ids=instance_ids, - filters=filters, - dry_run=dry_run, - max_results=max_results) - return [instance for reservation in reservations - for instance in reservation.instances] + next_token = None + retval = [] + while True: + reservations = self.get_all_reservations(instance_ids=instance_ids, + filters=filters, + dry_run=dry_run, + max_results=max_results, + next_token=next_token) + retval.extend([instance for reservation in reservations for + instance in reservation.instances]) + next_token = reservations.next_token + if not next_token: + break + + return retval def get_all_reservations(self, instance_ids=None, filters=None, - dry_run=False, max_results=None): + dry_run=False, max_results=None, next_token=None): """ Retrieve all the instance reservations associated with your account. @@ -621,6 +649,10 @@ :param max_results: The maximum number of paginated instance items per response. + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + :rtype: list :return: A list of :class:`boto.ec2.instance.Reservation` """ @@ -641,6 +673,8 @@ params['DryRun'] = 'true' if max_results is not None: params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token return self.get_list('DescribeInstances', params, [('item', Reservation)], verb='POST') @@ -724,8 +758,8 @@ launch instances. :type security_groups: list of strings - :param security_groups: The names of the security groups with which to - associate instances. + :param security_groups: The names of the EC2 classic security groups + with which to associate instances :type user_data: string :param user_data: The Base64-encoded MIME user data to be made @@ -739,6 +773,8 @@ * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium @@ -753,6 +789,11 @@ * cg1.4xlarge * cc2.8xlarge * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge * i2.xlarge * i2.2xlarge * i2.4xlarge @@ -1062,6 +1103,7 @@ * sourceDestCheck * groupSet * ebsOptimized + * sriovNetSupport :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. @@ -1171,6 +1213,7 @@ * sourceDestCheck - Boolean (true) * groupSet - Set of Security Groups or IDs * ebsOptimized - Boolean (false) + * sriovNetSupport - String - ie: 'simple' :type value: string :param value: The new value for the attribute @@ -1284,7 +1327,8 @@ def get_spot_price_history(self, start_time=None, end_time=None, instance_type=None, product_description=None, availability_zone=None, dry_run=False, - max_results=None): + max_results=None, next_token=None, + filters=None): """ Retrieve the recent history of spot instances pricing. @@ -1322,6 +1366,19 @@ :param max_results: The maximum number of paginated items per response. + :type next_token: str + :param next_token: The next set of rows to return. This should + be the value of the ``next_token`` attribute from a previous + call to ``get_spot_price_history``. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + :rtype: list :return: A list tuples containing price and timestamp. """ @@ -1340,6 +1397,10 @@ params['DryRun'] = 'true' if max_results is not None: params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)], verb='POST') @@ -1407,6 +1468,8 @@ * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium @@ -1421,6 +1484,11 @@ * cg1.4xlarge * cc2.8xlarge * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge * i2.xlarge * i2.2xlarge * i2.4xlarge @@ -1792,6 +1860,37 @@ return self.get_status('AssignPrivateIpAddresses', params, verb='POST') + def _associate_address(self, status, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + elif network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + # Allocation id trumps public ip in order to associate with VPCs + if allocation_id is not None: + params['AllocationId'] = allocation_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if private_ip_address is not None: + params['PrivateIpAddress'] = private_ip_address + + if allow_reassociation: + params['AllowReassociation'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + if status: + return self.get_status('AssociateAddress', params, verb='POST') + else: + return self.get_object('AssociateAddress', params, Address, + verb='POST') + def associate_address(self, instance_id=None, public_ip=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False, @@ -1834,27 +1933,59 @@ :rtype: bool :return: True if successful """ - params = {} - if instance_id is not None: - params['InstanceId'] = instance_id - elif network_interface_id is not None: - params['NetworkInterfaceId'] = network_interface_id + return self._associate_address(True, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) - if public_ip is not None: - params['PublicIp'] = public_ip - elif allocation_id is not None: - params['AllocationId'] = allocation_id + def associate_address_object(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. - if private_ip_address is not None: - params['PrivateIpAddress'] = private_ip_address + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. - if allow_reassociation: - params['AllowReassociation'] = 'true' + :type instance_id: string + :param instance_id: The ID of the instance - if dry_run: - params['DryRun'] = 'true' + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. + + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. - return self.get_status('AssociateAddress', params, verb='POST') + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: class:`boto.ec2.address.Address` + :return: The associated address instance + """ + return self._associate_address(False, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) def disassociate_address(self, public_ip=None, association_id=None, dry_run=False): @@ -1875,10 +2006,12 @@ """ params = {} - if public_ip is not None: - params['PublicIp'] = public_ip - elif association_id is not None: + # If there is an association id it trumps public ip + # in order to successfully dissassociate with a VPC elastic ip + if association_id is not None: params['AssociationId'] = association_id + elif public_ip is not None: + params['PublicIp'] = public_ip if dry_run: params['DryRun'] = 'true' @@ -2113,8 +2246,8 @@ params['DryRun'] = 'true' return self.get_status('ModifyVolumeAttribute', params, verb='POST') - def create_volume(self, size, zone, snapshot=None, - volume_type=None, iops=None, dry_run=False): + def create_volume(self, size, zone, snapshot=None, volume_type=None, + iops=None, encrypted=False, dry_run=False): """ Create a new EBS Volume. @@ -2136,6 +2269,10 @@ :param iops: The provisioned IOPs you want to associate with this volume. (optional) + :type encrypted: bool + :param encrypted: Specifies whether the volume should be encrypted. + (optional) + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. @@ -2153,6 +2290,8 @@ params['VolumeType'] = volume_type if iops: params['Iops'] = str(iops) + if encrypted: + params['Encrypted'] = 'true' if dry_run: params['DryRun'] = 'true' return self.get_object('CreateVolume', params, Volume, verb='POST') @@ -2726,7 +2865,7 @@ def import_key_pair(self, key_name, public_key_material, dry_run=False): """ - mports the public key from an RSA key pair that you created + imports the public key from an RSA key pair that you created with a third-party tool. Supported formats: @@ -4214,15 +4353,15 @@ params['DryRun'] = 'true' return self.get_status('DeleteNetworkInterface', params, verb='POST') - def get_all_vmtypes(self): + def get_all_instance_types(self): """ - Get all vmtypes available on this cloud (eucalyptus specific) + Get all instance_types available on this cloud (eucalyptus specific) - :rtype: list of :class:`boto.ec2.vmtype.VmType` - :return: The requested VmType objects + :rtype: list of :class:`boto.ec2.instancetype.InstanceType` + :return: The requested InstanceType objects """ params = {} - return self.get_list('DescribeVmTypes', params, [('euca:item', VmType)], verb='POST') + return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST') def copy_image(self, source_region, source_image_id, name=None, description=None, client_token=None, dry_run=False): diff -Nru python-boto-2.20.1/boto/ec2/ec2object.py python-boto-2.29.1/boto/ec2/ec2object.py --- python-boto-2.20.1/boto/ec2/ec2object.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/ec2object.py 2014-05-30 20:49:34.000000000 +0000 @@ -53,7 +53,7 @@ """ def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(TaggedEC2Object, self).__init__(connection) self.tags = TagSet() def startElement(self, name, attrs, connection): diff -Nru python-boto-2.20.1/boto/ec2/elb/attributes.py python-boto-2.29.1/boto/ec2/elb/attributes.py --- python-boto-2.20.1/boto/ec2/elb/attributes.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/elb/attributes.py 2014-05-30 20:49:34.000000000 +0000 @@ -40,6 +40,66 @@ else: self.enabled = False +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return 'AccessLog(%s, %s, %s, %s)' % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'S3BucketName': + self.s3_bucket_name = value + elif name == 'S3BucketPrefix': + self.s3_bucket_prefix = value + elif name == 'EmitInterval': + self.emit_interval = int(value) + +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return 'ConnectionDraining(%s, %s)' % ( + self.enabled, + self.timeout + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'Timeout': + self.timeout = int(value) + class LbAttributes(object): """ Represents the Attributes of an Elastic Load Balancer. @@ -48,14 +108,22 @@ self.connection = connection self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( self.connection) + self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) def __repr__(self): - return 'LbAttributes(%s)' % ( - repr(self.cross_zone_load_balancing)) + return 'LbAttributes(%s, %s, %s)' % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log), + repr(self.connection_draining)) def startElement(self, name, attrs, connection): if name == 'CrossZoneLoadBalancing': return self.cross_zone_load_balancing - + if name == 'AccessLog': + return self.access_log + if name == 'ConnectionDraining': + return self.connection_draining + def endElement(self, name, value, connection): pass diff -Nru python-boto-2.20.1/boto/ec2/elb/__init__.py python-boto-2.29.1/boto/ec2/elb/__init__.py --- python-boto-2.20.1/boto/ec2/elb/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/elb/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,20 +31,10 @@ from boto.ec2.elb.instancestate import InstanceState from boto.ec2.elb.healthcheck import HealthCheck from boto.ec2.elb.listelement import ListElement -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions import boto -RegionData = { - 'us-east-1': 'elasticloadbalancing.us-east-1.amazonaws.com', - 'us-gov-west-1': 'elasticloadbalancing.us-gov-west-1.amazonaws.com', - 'us-west-1': 'elasticloadbalancing.us-west-1.amazonaws.com', - 'us-west-2': 'elasticloadbalancing.us-west-2.amazonaws.com', - 'sa-east-1': 'elasticloadbalancing.sa-east-1.amazonaws.com', - 'eu-west-1': 'elasticloadbalancing.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'elasticloadbalancing.ap-southeast-2.amazonaws.com', -} +RegionData = load_regions().get('elasticloadbalancing', {}) def regions(): @@ -54,13 +44,7 @@ :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=ELBConnection) - regions.append(region) - return regions + return get_regions('elasticloadbalancing', connection_cls=ELBConnection) def connect_to_region(region_name, **kw_params): @@ -91,7 +75,7 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2 Load Balancing Service. @@ -102,20 +86,21 @@ region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(ELBConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['ec2'] def build_list_params(self, params, items, label): - if isinstance(items, str): + if isinstance(items, basestring): items = [items] for index, item in enumerate(items): params[label % (index + 1)] = item @@ -400,6 +385,8 @@ :param attribute: The attribute you wish to change. * crossZoneLoadBalancing - Boolean (true) + * accessLog - :py:class:`AccessLogAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :type value: string :param value: The new value for the attribute @@ -420,6 +407,20 @@ if attribute.lower() == 'crosszoneloadbalancing': params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled' ] = value + elif attribute.lower() == 'accesslog': + params['LoadBalancerAttributes.AccessLog.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \ + value.s3_bucket_name + params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \ + value.s3_bucket_prefix + params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ + value.emit_interval + elif attribute.lower() == 'connectiondraining': + params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \ + value.timeout else: raise ValueError('InvalidAttribute', attribute) return self.get_status('ModifyLoadBalancerAttributes', params, @@ -450,14 +451,20 @@ :type attribute: string :param attribute: The attribute you wish to see. + * accessLog - :py:class:`AccessLogAttribute` instance * crossZoneLoadBalancing - Boolean + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :rtype: Attribute dependent :return: The new value for the attribute """ attributes = self.get_all_lb_attributes(load_balancer_name) + if attribute.lower() == 'accesslog': + return attributes.access_log if attribute.lower() == 'crosszoneloadbalancing': return attributes.cross_zone_load_balancing.enabled + if attribute.lower() == 'connectiondraining': + return attributes.connection_draining return None def register_instances(self, load_balancer_name, instances): @@ -608,7 +615,7 @@ def create_lb_policy(self, lb_name, policy_name, policy_type, policy_attributes): """ - Creates a new policy that contais the necessary attributes depending on + Creates a new policy that contains the necessary attributes depending on the policy type. Policies are settings that are saved for your load balancer and that can be applied to the front-end listener, or the back-end application server. @@ -640,7 +647,10 @@ """ params = {'LoadBalancerName': lb_name, 'LoadBalancerPort': lb_port} - self.build_list_params(params, policies, 'PolicyNames.member.%d') + if len(policies): + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' return self.get_status('SetLoadBalancerPoliciesOfListener', params) def set_lb_policies_of_backend_server(self, lb_name, instance_port, policies): diff -Nru python-boto-2.20.1/boto/ec2/elb/loadbalancer.py python-boto-2.29.1/boto/ec2/elb/loadbalancer.py --- python-boto-2.20.1/boto/ec2/elb/loadbalancer.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/elb/loadbalancer.py 2014-05-30 20:49:34.000000000 +0000 @@ -82,6 +82,7 @@ check policy for this load balancer. :ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and other policies. + :ivar str name: The name of the Load Balancer. :ivar str dns_name: The external DNS name for the balancer. :ivar str created_time: A date+time string showing when the load balancer was created. @@ -186,7 +187,7 @@ :param zones: The name of the zone(s) to add. """ - if isinstance(zones, str) or isinstance(zones, unicode): + if isinstance(zones, basestring): zones = [zones] new_zones = self.connection.enable_availability_zones(self.name, zones) self.availability_zones = new_zones @@ -199,7 +200,7 @@ :param zones: The name of the zone(s) to add. """ - if isinstance(zones, str) or isinstance(zones, unicode): + if isinstance(zones, basestring): zones = [zones] new_zones = self.connection.disable_availability_zones(self.name, zones) self.availability_zones = new_zones @@ -266,7 +267,7 @@ to add to this load balancer. """ - if isinstance(instances, str) or isinstance(instances, unicode): + if isinstance(instances, basestring): instances = [instances] new_instances = self.connection.register_instances(self.name, instances) @@ -281,7 +282,7 @@ to remove from this load balancer. """ - if isinstance(instances, str) or isinstance(instances, unicode): + if isinstance(instances, basestring): instances = [instances] new_instances = self.connection.deregister_instances(self.name, instances) @@ -324,7 +325,7 @@ listeners) def create_listener(self, inPort, outPort=None, proto="tcp"): - if outPort == None: + if outPort is None: outPort = inPort return self.create_listeners([(inPort, outPort, proto)]) @@ -380,7 +381,7 @@ :param subnets: The name of the subnet(s) to add. """ - if isinstance(subnets, str) or isinstance(subnets, unicode): + if isinstance(subnets, basestring): subnets = [subnets] new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets) self.subnets = new_subnets @@ -393,7 +394,7 @@ :param subnets: The name of the subnet(s) to detach. """ - if isinstance(subnets, str) or isinstance(subnets, unicode): + if isinstance(subnets, basestring): subnets = [subnets] new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets) self.subnets = new_subnets @@ -408,8 +409,7 @@ :param security_groups: The name of the security group(s) to add. """ - if isinstance(security_groups, str) or \ - isinstance(security_groups, unicode): + if isinstance(security_groups, basestring): security_groups = [security_groups] new_sgs = self.connection.apply_security_groups_to_lb( self.name, security_groups) diff -Nru python-boto-2.20.1/boto/ec2/group.py python-boto-2.29.1/boto/ec2/group.py --- python-boto-2.20.1/boto/ec2/group.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/group.py 2014-05-30 20:49:34.000000000 +0000 @@ -15,13 +15,12 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class Group: - +class Group(object): def __init__(self, parent=None): self.id = None self.name = None @@ -36,4 +35,4 @@ self.name = value else: setattr(self, name, value) - + diff -Nru python-boto-2.20.1/boto/ec2/image.py python-boto-2.29.1/boto/ec2/image.py --- python-boto-2.20.1/boto/ec2/image.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/image.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,8 +23,8 @@ from boto.ec2.ec2object import EC2Object, TaggedEC2Object from boto.ec2.blockdevicemapping import BlockDeviceMapping -class ProductCodes(list): +class ProductCodes(list): def startElement(self, name, attrs, connection): pass @@ -32,8 +32,8 @@ if name == 'productCode': self.append(value) -class BillingProducts(list): +class BillingProducts(list): def startElement(self, name, attrs, connection): pass @@ -47,7 +47,7 @@ """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(Image, self).__init__(connection) self.id = None self.location = None self.state = None @@ -70,12 +70,13 @@ self.virtualization_type = None self.hypervisor = None self.instance_lifecycle = None + self.sriov_net_support = None def __repr__(self): return 'Image:%s' % self.id def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(Image, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'blockDeviceMapping': @@ -136,6 +137,8 @@ self.hypervisor = value elif name == 'instanceLifecycle': self.instance_lifecycle = value + elif name == 'sriovNetSupport': + self.sriov_net_support = value else: setattr(self, name, value) @@ -205,6 +208,8 @@ * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium @@ -219,6 +224,11 @@ * cg1.4xlarge * cc2.8xlarge * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge * i2.xlarge * i2.2xlarge * i2.4xlarge @@ -370,8 +380,8 @@ ) return img_attrs.ramdisk -class ImageAttribute: +class ImageAttribute(object): def __init__(self, parent=None): self.name = None self.kernel = None diff -Nru python-boto-2.20.1/boto/ec2/__init__.py python-boto-2.29.1/boto/ec2/__init__.py --- python-boto-2.20.1/boto/ec2/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -24,20 +24,10 @@ service from AWS. """ from boto.ec2.connection import EC2Connection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions -RegionData = { - 'us-east-1': 'ec2.us-east-1.amazonaws.com', - 'us-gov-west-1': 'ec2.us-gov-west-1.amazonaws.com', - 'us-west-1': 'ec2.us-west-1.amazonaws.com', - 'us-west-2': 'ec2.us-west-2.amazonaws.com', - 'sa-east-1': 'ec2.sa-east-1.amazonaws.com', - 'eu-west-1': 'ec2.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'ec2.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'ec2.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'ec2.ap-southeast-2.amazonaws.com', -} +RegionData = load_regions().get('ec2', {}) def regions(**kw_params): @@ -50,13 +40,7 @@ :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=EC2Connection) - regions.append(region) - return regions + return get_regions('ec2', connection_cls=EC2Connection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/ec2/instance.py python-boto-2.29.1/boto/ec2/instance.py --- python-boto-2.20.1/boto/ec2/instance.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/instance.py 2014-05-30 20:49:34.000000000 +0000 @@ -122,7 +122,7 @@ Reservation. """ def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(Reservation, self).__init__(connection) self.id = None self.owner_id = None self.groups = [] @@ -211,7 +211,7 @@ """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(Instance, self).__init__(connection) self.id = None self.dns_name = None self.public_dns_name = None @@ -288,7 +288,7 @@ return self._placement.tenancy def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(Instance, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'monitoring': @@ -606,8 +606,7 @@ ) -class ConsoleOutput: - +class ConsoleOutput(object): def __init__(self, parent=None): self.parent = parent self.instance_id = None @@ -629,7 +628,6 @@ class InstanceAttribute(dict): - ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination', 'instanceInitiatedShutdownBehavior', @@ -668,7 +666,6 @@ class SubParse(dict): - def __init__(self, section, parent=None): dict.__init__(self) self.section = section diff -Nru python-boto-2.20.1/boto/ec2/instancetype.py python-boto-2.29.1/boto/ec2/instancetype.py --- python-boto-2.20.1/boto/ec2/instancetype.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/instancetype.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, + memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores, + self.memory, self.disk) + + def endElement(self, name, value, connection): + if name == 'name': + self.name = value + elif name == 'cpu': + self.cores = value + elif name == 'disk': + self.disk = value + elif name == 'memory': + self.memory = value + else: + setattr(self, name, value) diff -Nru python-boto-2.20.1/boto/ec2/keypair.py python-boto-2.29.1/boto/ec2/keypair.py --- python-boto-2.20.1/boto/ec2/keypair.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/keypair.py 2014-05-30 20:49:34.000000000 +0000 @@ -30,7 +30,7 @@ class KeyPair(EC2Object): def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(KeyPair, self).__init__(connection) self.name = None self.fingerprint = None self.material = None diff -Nru python-boto-2.20.1/boto/ec2/launchspecification.py python-boto-2.29.1/boto/ec2/launchspecification.py --- python-boto-2.20.1/boto/ec2/launchspecification.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/launchspecification.py 2014-05-30 20:49:34.000000000 +0000 @@ -44,7 +44,7 @@ class LaunchSpecification(EC2Object): def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(LaunchSpecification, self).__init__(connection) self.key_name = None self.instance_type = None self.image_id = None diff -Nru python-boto-2.20.1/boto/ec2/networkinterface.py python-boto-2.29.1/boto/ec2/networkinterface.py --- python-boto-2.20.1/boto/ec2/networkinterface.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/networkinterface.py 2014-05-30 20:49:34.000000000 +0000 @@ -99,7 +99,7 @@ """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(NetworkInterface, self).__init__(connection) self.id = None self.subnet_id = None self.vpc_id = None @@ -119,7 +119,8 @@ return 'NetworkInterface:%s' % self.id def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(NetworkInterface, self).startElement(name, attrs, + connection) if retval is not None: return retval if name == 'groupSet': diff -Nru python-boto-2.20.1/boto/ec2/placementgroup.py python-boto-2.29.1/boto/ec2/placementgroup.py --- python-boto-2.20.1/boto/ec2/placementgroup.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/placementgroup.py 2014-05-30 20:49:34.000000000 +0000 @@ -27,7 +27,7 @@ class PlacementGroup(EC2Object): def __init__(self, connection=None, name=None, strategy=None, state=None): - EC2Object.__init__(self, connection) + super(PlacementGroup, self).__init__(connection) self.name = name self.strategy = strategy self.state = state diff -Nru python-boto-2.20.1/boto/ec2/regioninfo.py python-boto-2.29.1/boto/ec2/regioninfo.py --- python-boto-2.20.1/boto/ec2/regioninfo.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -16,7 +16,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -27,8 +27,9 @@ """ Represents an EC2 Region """ - - def __init__(self, connection=None, name=None, endpoint=None): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.ec2.connection import EC2Connection - RegionInfo.__init__(self, connection, name, endpoint, + super(EC2RegionInfo, self).__init__(connection, name, endpoint, EC2Connection) diff -Nru python-boto-2.20.1/boto/ec2/reservedinstance.py python-boto-2.29.1/boto/ec2/reservedinstance.py --- python-boto-2.20.1/boto/ec2/reservedinstance.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/reservedinstance.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,7 +31,7 @@ usage_price=None, description=None, instance_tenancy=None, currency_code=None, offering_type=None, recurring_charges=None, pricing_details=None): - EC2Object.__init__(self, connection) + super(ReservedInstancesOffering, self).__init__(connection) self.id = id self.instance_type = instance_type self.availability_zone = availability_zone @@ -128,9 +128,10 @@ availability_zone=None, duration=None, fixed_price=None, usage_price=None, description=None, instance_count=None, state=None): - ReservedInstancesOffering.__init__(self, connection, id, instance_type, - availability_zone, duration, fixed_price, - usage_price, description) + super(ReservedInstance, self).__init__(connection, id, instance_type, + availability_zone, duration, + fixed_price, usage_price, + description) self.instance_count = instance_count self.state = state self.start = None @@ -148,7 +149,7 @@ elif name == 'start': self.start = value else: - ReservedInstancesOffering.endElement(self, name, value, connection) + super(ReservedInstance, self).endElement(name, value, connection) class ReservedInstanceListing(EC2Object): diff -Nru python-boto-2.20.1/boto/ec2/securitygroup.py python-boto-2.29.1/boto/ec2/securitygroup.py --- python-boto-2.20.1/boto/ec2/securitygroup.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/securitygroup.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,7 +31,7 @@ def __init__(self, connection=None, owner_id=None, name=None, description=None, id=None): - TaggedEC2Object.__init__(self, connection) + super(SecurityGroup, self).__init__(connection) self.id = id self.owner_id = owner_id self.name = name @@ -44,7 +44,8 @@ return 'SecurityGroup:%s' % self.name def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(SecurityGroup, self).startElement(name, attrs, + connection) if retval is not None: return retval if name == 'ipPermissions': diff -Nru python-boto-2.20.1/boto/ec2/snapshot.py python-boto-2.29.1/boto/ec2/snapshot.py --- python-boto-2.20.1/boto/ec2/snapshot.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/snapshot.py 2014-05-30 20:49:34.000000000 +0000 @@ -26,12 +26,12 @@ from boto.ec2.ec2object import TaggedEC2Object from boto.ec2.zone import Zone -class Snapshot(TaggedEC2Object): +class Snapshot(TaggedEC2Object): AttrName = 'createVolumePermission' def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(Snapshot, self).__init__(connection) self.id = None self.volume_id = None self.status = None @@ -41,6 +41,7 @@ self.owner_alias = None self.volume_size = None self.description = None + self.encrypted = None def __repr__(self): return 'Snapshot:%s' % self.id @@ -65,6 +66,8 @@ self.volume_size = value elif name == 'description': self.description = value + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') else: setattr(self, name, value) @@ -152,12 +155,12 @@ self.id, volume_type, iops, + self.encrypted, dry_run=dry_run ) -class SnapshotAttribute: - +class SnapshotAttribute(object): def __init__(self, parent=None): self.snapshot_id = None self.attrs = {} diff -Nru python-boto-2.20.1/boto/ec2/spotdatafeedsubscription.py python-boto-2.29.1/boto/ec2/spotdatafeedsubscription.py --- python-boto-2.20.1/boto/ec2/spotdatafeedsubscription.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/spotdatafeedsubscription.py 2014-05-30 20:49:34.000000000 +0000 @@ -29,7 +29,7 @@ def __init__(self, connection=None, owner_id=None, bucket=None, prefix=None, state=None,fault=None): - EC2Object.__init__(self, connection) + super(SpotDatafeedSubscription, self).__init__(connection) self.owner_id = owner_id self.bucket = bucket self.prefix = prefix diff -Nru python-boto-2.20.1/boto/ec2/spotinstancerequest.py python-boto-2.29.1/boto/ec2/spotinstancerequest.py --- python-boto-2.20.1/boto/ec2/spotinstancerequest.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/spotinstancerequest.py 2014-05-30 20:49:34.000000000 +0000 @@ -120,7 +120,7 @@ """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(SpotInstanceRequest, self).__init__(connection) self.id = None self.price = None self.type = None @@ -141,7 +141,8 @@ return 'SpotInstanceRequest:%s' % self.id def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(SpotInstanceRequest, self).startElement(name, attrs, + connection) if retval is not None: return retval if name == 'launchSpecification': diff -Nru python-boto-2.20.1/boto/ec2/spotpricehistory.py python-boto-2.29.1/boto/ec2/spotpricehistory.py --- python-boto-2.20.1/boto/ec2/spotpricehistory.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/spotpricehistory.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,9 +26,9 @@ from boto.ec2.ec2object import EC2Object class SpotPriceHistory(EC2Object): - + def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(SpotPriceHistory, self).__init__(connection) self.price = 0.0 self.instance_type = None self.product_description = None diff -Nru python-boto-2.20.1/boto/ec2/vmtype.py python-boto-2.29.1/boto/ec2/vmtype.py --- python-boto-2.20.1/boto/ec2/vmtype.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/vmtype.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. - - -from boto.ec2.ec2object import EC2Object - - -class VmType(EC2Object): - """ - Represents an EC2 VM Type - - :ivar name: The name of the vm type - :ivar cores: The number of cpu cores for this vm type - :ivar memory: The amount of memory in megabytes for this vm type - :ivar disk: The amount of disk space in gigabytes for this vm type - """ - - def __init__(self, connection=None, name=None, cores=None, - memory=None, disk=None): - EC2Object.__init__(self, connection) - self.connection = connection - self.name = name - self.cores = cores - self.memory = memory - self.disk = disk - - def __repr__(self): - return 'VmType:%s-%s,%s,%s' % (self.name, self.cores, - self.memory, self.disk) - - def endElement(self, name, value, connection): - if name == 'euca:name': - self.name = value - elif name == 'euca:cpu': - self.cores = value - elif name == 'euca:disk': - self.disk = value - elif name == 'euca:memory': - self.memory = value - else: - setattr(self, name, value) diff -Nru python-boto-2.20.1/boto/ec2/volume.py python-boto-2.29.1/boto/ec2/volume.py --- python-boto-2.20.1/boto/ec2/volume.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/volume.py 2014-05-30 20:49:34.000000000 +0000 @@ -44,10 +44,11 @@ :ivar type: The type of volume (standard or consistent-iops) :ivar iops: If this volume is of type consistent-iops, this is the number of IOPS provisioned (10-300). + :ivar encrypted: True if this volume is encrypted. """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(Volume, self).__init__(connection) self.id = None self.create_time = None self.status = None @@ -57,12 +58,13 @@ self.zone = None self.type = None self.iops = None + self.encrypted = None def __repr__(self): return 'Volume:%s' % self.id def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(Volume, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'attachmentSet': @@ -92,6 +94,8 @@ self.type = value elif name == 'iops': self.iops = int(value) + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') else: setattr(self, name, value) @@ -260,7 +264,6 @@ :ivar attach_time: Attached since :ivar device: The device the instance has mapped """ - def __init__(self): self.id = None self.instance_id = None @@ -289,8 +292,7 @@ setattr(self, name, value) -class VolumeAttribute: - +class VolumeAttribute(object): def __init__(self, parent=None): self.id = None self._key_name = None diff -Nru python-boto-2.20.1/boto/ec2/zone.py python-boto-2.29.1/boto/ec2/zone.py --- python-boto-2.20.1/boto/ec2/zone.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ec2/zone.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -38,7 +38,7 @@ self.append(value) else: setattr(self, name, value) - + class Zone(EC2Object): """ Represents an Availability Zone. @@ -48,9 +48,9 @@ :ivar region_name: The name of the region the zone is associated with. :ivar messages: A list of messages related to the zone. """ - + def __init__(self, connection=None): - EC2Object.__init__(self, connection) + super(Zone, self).__init__(connection) self.name = None self.state = None self.region_name = None @@ -64,7 +64,7 @@ self.messages = MessageSet() return self.messages return None - + def endElement(self, name, value, connection): if name == 'zoneName': self.name = value diff -Nru python-boto-2.20.1/boto/ecs/__init__.py python-boto-2.29.1/boto/ecs/__init__.py --- python-boto-2.20.1/boto/ecs/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ecs/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,13 +14,14 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto from boto.connection import AWSQueryConnection, AWSAuthConnection +from boto.exception import BotoServerError import time import urllib import xml.sax @@ -41,10 +42,13 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com', - debug=0, https_connection_factory=None, path='/'): - AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + debug=0, https_connection_factory=None, path='/', + security_token=None, profile_name=None): + super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, - host, debug, https_connection_factory, path) + host, debug, https_connection_factory, path, + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): return ['ecs'] @@ -64,27 +68,38 @@ if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise BotoServerError(response.status, response.reason, body) - if itemSet == None: + if itemSet is None: rs = ItemSet(self, action, params, page) else: rs = itemSet h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) + if not rs.is_valid: + raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0])) return rs # # Group methods # - + def item_search(self, search_index, **params): """ - Returns items that satisfy the search criteria, including one or more search + Returns items that satisfy the search criteria, including one or more search indices. - For a full list of search terms, + For a full list of search terms, :see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html """ params['SearchIndex'] = search_index return self.get_response('ItemSearch', params) + + def item_lookup(self, **params): + """ + Returns items that satisfy the lookup query. + + For a full list of parameters, see: + http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf + """ + return self.get_response('ItemLookup', params) \ No newline at end of file diff -Nru python-boto-2.20.1/boto/ecs/item.py python-boto-2.29.1/boto/ecs/item.py --- python-boto-2.20.1/boto/ecs/item.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ecs/item.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,7 @@ class ResponseGroup(xml.sax.ContentHandler): """A Generic "Response Group", which can - be anything from the entire list of Items to + be anything from the entire list of Items to specific response elements within an item""" def __init__(self, connection=None, nodename=None): @@ -45,7 +45,7 @@ # def get(self, name): return self.__dict__.get(name) - + def set(self, name, value): self.__dict__[name] = value @@ -106,11 +106,13 @@ self.curItem = None self.total_results = 0 self.total_pages = 0 + self.is_valid = False + self.errors = [] def startElement(self, name, attrs, connection): if name == "Item": self.curItem = Item(self._connection) - elif self.curItem != None: + elif self.curItem is not None: self.curItem.startElement(name, attrs, connection) return None @@ -119,17 +121,24 @@ self.total_results = value elif name == 'TotalPages': self.total_pages = value - elif name == "Item": + elif name == 'IsValid': + if value == 'True': + self.is_valid = True + elif name == 'Code': + self.errors.append({'Code': value, 'Message': None}) + elif name == 'Message': + self.errors[-1]['Message'] = value + elif name == 'Item': self.objs.append(self.curItem) self._xml.write(self.curItem.to_xml()) self.curItem = None - elif self.curItem != None: + elif self.curItem is not None: self.curItem.endElement(name, value, connection) return None def next(self): """Special paging functionality""" - if self.iter == None: + if self.iter is None: self.iter = iter(self.objs) try: return self.iter.next() diff -Nru python-boto-2.20.1/boto/elasticache/__init__.py python-boto-2.29.1/boto/elasticache/__init__.py --- python-boto-2.20.1/boto/elasticache/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/elasticache/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,31 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.elasticache.layer1 import ElastiCacheConnection - return [RegionInfo(name='us-east-1', - endpoint='elasticache.us-east-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='us-west-1', - endpoint='elasticache.us-west-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='us-west-2', - endpoint='elasticache.us-west-2.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='eu-west-1', - endpoint='elasticache.eu-west-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-northeast-1', - endpoint='elasticache.ap-northeast-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-southeast-1', - endpoint='elasticache.ap-southeast-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-southeast-2', - endpoint='elasticache.ap-southeast-2.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='sa-east-1', - endpoint='elasticache.sa-east-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - ] + return get_regions('elasticache', connection_cls=ElastiCacheConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/elasticache/layer1.py python-boto-2.29.1/boto/elasticache/layer1.py --- python-boto-2.20.1/boto/elasticache/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/elasticache/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -55,12 +55,12 @@ else: del kwargs['region'] kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(ElastiCacheConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): - return ['sign-v2'] + return ['hmac-v4'] def authorize_cache_security_group_ingress(self, cache_security_group_name, diff -Nru python-boto-2.20.1/boto/elastictranscoder/__init__.py python-boto-2.29.1/boto/elastictranscoder/__init__.py --- python-boto-2.20.1/boto/elastictranscoder/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/elastictranscoder/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,27 +31,10 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.elastictranscoder.layer1 import ElasticTranscoderConnection - cls = ElasticTranscoderConnection - return [ - RegionInfo(name='us-east-1', - endpoint='elastictranscoder.us-east-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-1', - endpoint='elastictranscoder.us-west-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-2', - endpoint='elastictranscoder.us-west-2.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-northeast-1', - endpoint='elastictranscoder.ap-northeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-1', - endpoint='elastictranscoder.ap-southeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='eu-west-1', - endpoint='elastictranscoder.eu-west-1.amazonaws.com', - connection_cls=cls), - ] + return get_regions( + 'elastictranscoder', + connection_cls=ElasticTranscoderConnection + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/elastictranscoder/layer1.py python-boto-2.29.1/boto/elastictranscoder/layer1.py --- python-boto-2.20.1/boto/elastictranscoder/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/elastictranscoder/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -55,7 +55,7 @@ else: del kwargs['region'] kwargs['host'] = region.endpoint - AWSAuthConnection.__init__(self, **kwargs) + super(ElasticTranscoderConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): @@ -523,26 +523,56 @@ return self.make_request('GET', uri, expected_status=200, params=params) - def list_pipelines(self): + def list_pipelines(self, ascending=None, page_token=None): """ The ListPipelines operation gets a list of the pipelines associated with the current AWS account. - + :type ascending: string + :param ascending: To list pipelines in chronological order by the date + and time that they were created, enter `True`. To list pipelines in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + """ - uri = '/2012-09-25/pipelines' - return self.make_request('GET', uri, expected_status=200) + uri = '/2012-09-25/pipelines'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) - def list_presets(self): + def list_presets(self, ascending=None, page_token=None): """ The ListPresets operation gets a list of the default presets included with Elastic Transcoder and the presets that you've added in an AWS region. - + :type ascending: string + :param ascending: To list presets in chronological order by the date + and time that they were created, enter `True`. To list presets in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + """ - uri = '/2012-09-25/presets' - return self.make_request('GET', uri, expected_status=200) + uri = '/2012-09-25/presets'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) def read_job(self, id=None): """ @@ -891,8 +921,8 @@ expected_status=None, params=None): if headers is None: headers = {} - response = AWSAuthConnection.make_request( - self, verb, resource, headers=headers, data=data) + response = super(ElasticTranscoderConnection, self).make_request( + verb, resource, headers=headers, data=data) body = json.load(response) if response.status == expected_status: return body diff -Nru python-boto-2.20.1/boto/emr/connection.py python-boto-2.29.1/boto/emr/connection.py --- python-boto-2.20.1/boto/emr/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/emr/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -55,19 +55,20 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(EmrConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) # Many of the EMR hostnames are of the form: # ..amazonaws.com # rather than the more common: @@ -265,7 +266,43 @@ if step_states: self.build_list_params(params, step_states, 'StepStateList.member') - self.get_object('ListSteps', params, StepSummaryList) + return self.get_object('ListSteps', params, StepSummaryList) + + def add_tags(self, resource_id, tags): + """ + Create new metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: dict + :param tags: A dictionary containing the name/value pairs. + If you want to create only a tag name, the + value for that tag should be the empty string + (e.g. '') or None. + """ + assert isinstance(resource_id, basestring) + params = { + 'ResourceId': resource_id, + } + params.update(self._build_tag_list(tags)) + return self.get_status('AddTags', params, verb='POST') + + def remove_tags(self, resource_id, tags): + """ + Remove metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: list + :param tags: A list of tag names to remove. + """ + params = { + 'ResourceId': resource_id, + } + params.update(self._build_string_list('TagKeys', tags)) + return self.get_status('RemoveTags', params, verb='POST') def terminate_jobflow(self, jobflow_id): """ @@ -623,6 +660,27 @@ params['Steps.member.%s.%s' % (i+1, key)] = value return params + def _build_string_list(self, field, items): + if not isinstance(items, types.ListType): + items = [items] + + params = {} + for i, item in enumerate(items): + params['%s.member.%s' % (field, i + 1)] = item + return params + + def _build_tag_list(self, tags): + assert isinstance(tags, dict) + + params = {} + for i, key_value in enumerate(sorted(tags.iteritems()), start=1): + key, value = key_value + current_prefix = 'Tags.member.%s' % i + params['%s.Key' % current_prefix] = key + if value: + params['%s.Value' % current_prefix] = value + return params + def _build_instance_common_args(self, ec2_keyname, availability_zone, keep_alive, hadoop_version): """ diff -Nru python-boto-2.20.1/boto/emr/emrobject.py python-boto-2.29.1/boto/emr/emrobject.py --- python-boto-2.20.1/boto/emr/emrobject.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/emr/emrobject.py 2014-05-30 20:49:34.000000000 +0000 @@ -256,16 +256,21 @@ self.status = None self.ec2instanceattributes = None self.applications = None + self.tags = None def startElement(self, name, attrs, connection): if name == 'Status': self.status = ClusterStatus() return self.status - elif name == 'EC2InstanceAttributes': + elif name == 'Ec2InstanceAttributes': self.ec2instanceattributes = Ec2InstanceAttributes() return self.ec2instanceattributes elif name == 'Applications': self.applications = ResultSet([('member', Application)]) + return self.applications + elif name == 'Tags': + self.tags = ResultSet([('member', KeyValue)]) + return self.tags else: return None diff -Nru python-boto-2.20.1/boto/emr/__init__.py python-boto-2.29.1/boto/emr/__init__.py --- python-boto-2.20.1/boto/emr/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/emr/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -29,7 +29,7 @@ from connection import EmrConnection from step import Step, StreamingStep, JarStep from bootstrap_action import BootstrapAction -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -39,31 +39,7 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ - return [RegionInfo(name='us-east-1', - endpoint='elasticmapreduce.us-east-1.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='us-west-1', - endpoint='us-west-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='us-west-2', - endpoint='us-west-2.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-northeast-1', - endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-southeast-1', - endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-southeast-2', - endpoint='ap-southeast-2.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='eu-west-1', - endpoint='eu-west-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='sa-east-1', - endpoint='sa-east-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - ] + return get_regions('elasticmapreduce', connection_cls=EmrConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/emr/step.py python-boto-2.29.1/boto/emr/step.py --- python-boto-2.20.1/boto/emr/step.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/emr/step.py 2014-05-30 20:49:34.000000000 +0000 @@ -204,7 +204,7 @@ ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' def __init__(self, name, **kw): - JarStep.__init__(self, name, self.ScriptRunnerJar, **kw) + super(ScriptRunnerStep, self).__init__(name, self.ScriptRunnerJar, **kw) class PigBase(ScriptRunnerStep): @@ -225,7 +225,7 @@ step_args.extend(self.BaseArgs) step_args.extend(['--install-pig']) step_args.extend(['--pig-versions', pig_versions]) - ScriptRunnerStep.__init__(self, self.InstallPigName, step_args=step_args) + super(InstallPigStep, self).__init__(self.InstallPigName, step_args=step_args) class PigStep(PigBase): @@ -239,7 +239,7 @@ step_args.extend(['--pig-versions', pig_versions]) step_args.extend(['--run-pig-script', '--args', '-f', pig_file]) step_args.extend(pig_args) - ScriptRunnerStep.__init__(self, name, step_args=step_args) + super(PigStep, self).__init__(name, step_args=step_args) class HiveBase(ScriptRunnerStep): @@ -261,7 +261,7 @@ step_args.extend(['--hive-versions', hive_versions]) if hive_site is not None: step_args.extend(['--hive-site=%s' % hive_site]) - ScriptRunnerStep.__init__(self, self.InstallHiveName, + super(InstallHiveStep, self).__init__(self.InstallHiveName, step_args=step_args) @@ -278,4 +278,4 @@ step_args.extend(['--run-hive-script', '--args', '-f', hive_file]) if hive_args is not None: step_args.extend(hive_args) - ScriptRunnerStep.__init__(self, name, step_args=step_args) + super(HiveStep, self).__init__(name, step_args=step_args) diff -Nru python-boto-2.20.1/boto/endpoints.json python-boto-2.29.1/boto/endpoints.json --- python-boto-2.20.1/boto/endpoints.json 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/endpoints.json 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,311 @@ +{ + "autoscaling": { + "ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "autoscaling.ap-southeast-2.amazonaws.com", + "cn-north-1": "autoscaling.cn-north-1.amazonaws.com.cn", + "eu-west-1": "autoscaling.eu-west-1.amazonaws.com", + "sa-east-1": "autoscaling.sa-east-1.amazonaws.com", + "us-east-1": "autoscaling.us-east-1.amazonaws.com", + "us-gov-west-1": "autoscaling.us-gov-west-1.amazonaws.com", + "us-west-1": "autoscaling.us-west-1.amazonaws.com", + "us-west-2": "autoscaling.us-west-2.amazonaws.com" + }, + "cloudformation": { + "ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudformation.ap-southeast-2.amazonaws.com", + "cn-north-1": "cloudformation.cn-north-1.amazonaws.com.cn", + "eu-west-1": "cloudformation.eu-west-1.amazonaws.com", + "sa-east-1": "cloudformation.sa-east-1.amazonaws.com", + "us-east-1": "cloudformation.us-east-1.amazonaws.com", + "us-gov-west-1": "cloudformation.us-gov-west-1.amazonaws.com", + "us-west-1": "cloudformation.us-west-1.amazonaws.com", + "us-west-2": "cloudformation.us-west-2.amazonaws.com" + }, + "cloudfront": { + "ap-northeast-1": "cloudfront.amazonaws.com", + "ap-southeast-1": "cloudfront.amazonaws.com", + "ap-southeast-2": "cloudfront.amazonaws.com", + "eu-west-1": "cloudfront.amazonaws.com", + "sa-east-1": "cloudfront.amazonaws.com", + "us-east-1": "cloudfront.amazonaws.com", + "us-west-1": "cloudfront.amazonaws.com", + "us-west-2": "cloudfront.amazonaws.com" + }, + "cloudsearch": { + "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", + "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com", + "us-east-1": "cloudsearch.us-east-1.amazonaws.com", + "us-west-1": "cloudsearch.us-west-1.amazonaws.com", + "us-west-2": "cloudsearch.us-west-2.amazonaws.com" + }, + "cloudtrail": { + "ap-southeast-2": "cloudtrail.ap-southeast-2.amazonaws.com", + "eu-west-1": "cloudtrail.eu-west-1.amazonaws.com", + "us-east-1": "cloudtrail.us-east-1.amazonaws.com", + "us-west-1": "cloudtrail.us-west-1.amazonaws.com", + "us-west-2": "cloudtrail.us-west-2.amazonaws.com" + }, + "cloudwatch": { + "ap-northeast-1": "monitoring.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "monitoring.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "monitoring.ap-southeast-2.amazonaws.com", + "cn-north-1": "monitoring.cn-north-1.amazonaws.com.cn", + "eu-west-1": "monitoring.eu-west-1.amazonaws.com", + "sa-east-1": "monitoring.sa-east-1.amazonaws.com", + "us-east-1": "monitoring.us-east-1.amazonaws.com", + "us-gov-west-1": "monitoring.us-gov-west-1.amazonaws.com", + "us-west-1": "monitoring.us-west-1.amazonaws.com", + "us-west-2": "monitoring.us-west-2.amazonaws.com" + }, + "datapipeline": { + "us-east-1": "datapipeline.us-east-1.amazonaws.com", + "us-west-2": "datapipeline.us-west-2.amazonaws.com", + "eu-west-1": "datapipeline.eu-west-1.amazonaws.com", + "ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com" + }, + "directconnect": { + "ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "directconnect.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "directconnect.ap-southeast-2.amazonaws.com", + "eu-west-1": "directconnect.eu-west-1.amazonaws.com", + "sa-east-1": "directconnect.sa-east-1.amazonaws.com", + "us-east-1": "directconnect.us-east-1.amazonaws.com", + "us-west-1": "directconnect.us-west-1.amazonaws.com", + "us-west-2": "directconnect.us-west-2.amazonaws.com" + }, + "dynamodb": { + "ap-northeast-1": "dynamodb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "dynamodb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "dynamodb.ap-southeast-2.amazonaws.com", + "cn-north-1": "dynamodb.cn-north-1.amazonaws.com.cn", + "eu-west-1": "dynamodb.eu-west-1.amazonaws.com", + "sa-east-1": "dynamodb.sa-east-1.amazonaws.com", + "us-east-1": "dynamodb.us-east-1.amazonaws.com", + "us-gov-west-1": "dynamodb.us-gov-west-1.amazonaws.com", + "us-west-1": "dynamodb.us-west-1.amazonaws.com", + "us-west-2": "dynamodb.us-west-2.amazonaws.com" + }, + "ec2": { + "ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com", + "cn-north-1": "ec2.cn-north-1.amazonaws.com.cn", + "eu-west-1": "ec2.eu-west-1.amazonaws.com", + "sa-east-1": "ec2.sa-east-1.amazonaws.com", + "us-east-1": "ec2.us-east-1.amazonaws.com", + "us-gov-west-1": "ec2.us-gov-west-1.amazonaws.com", + "us-west-1": "ec2.us-west-1.amazonaws.com", + "us-west-2": "ec2.us-west-2.amazonaws.com" + }, + "elasticache": { + "ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticache.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticache.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticache.eu-west-1.amazonaws.com", + "sa-east-1": "elasticache.sa-east-1.amazonaws.com", + "us-east-1": "elasticache.us-east-1.amazonaws.com", + "us-west-1": "elasticache.us-west-1.amazonaws.com", + "us-west-2": "elasticache.us-west-2.amazonaws.com" + }, + "elasticbeanstalk": { + "ap-northeast-1": "elasticbeanstalk.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticbeanstalk.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticbeanstalk.ap-southeast-2.amazonaws.com", + "eu-west-1": "elasticbeanstalk.eu-west-1.amazonaws.com", + "sa-east-1": "elasticbeanstalk.sa-east-1.amazonaws.com", + "us-east-1": "elasticbeanstalk.us-east-1.amazonaws.com", + "us-west-1": "elasticbeanstalk.us-west-1.amazonaws.com", + "us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com" + }, + "elasticloadbalancing": { + "ap-northeast-1": "elasticloadbalancing.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticloadbalancing.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticloadbalancing.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticloadbalancing.eu-west-1.amazonaws.com", + "sa-east-1": "elasticloadbalancing.sa-east-1.amazonaws.com", + "us-east-1": "elasticloadbalancing.us-east-1.amazonaws.com", + "us-gov-west-1": "elasticloadbalancing.us-gov-west-1.amazonaws.com", + "us-west-1": "elasticloadbalancing.us-west-1.amazonaws.com", + "us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com" + }, + "elasticmapreduce": { + "ap-northeast-1": "ap-northeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.elasticmapreduce.amazonaws.com", + "cn-north-1": "elasticmapreduce.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticmapreduce.eu-west-1.amazonaws.com", + "sa-east-1": "sa-east-1.elasticmapreduce.amazonaws.com", + "us-east-1": "elasticmapreduce.us-east-1.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com", + "us-west-1": "us-west-1.elasticmapreduce.amazonaws.com", + "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com" + }, + "elastictranscoder": { + "ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elastictranscoder.ap-southeast-1.amazonaws.com", + "eu-west-1": "elastictranscoder.eu-west-1.amazonaws.com", + "us-east-1": "elastictranscoder.us-east-1.amazonaws.com", + "us-west-1": "elastictranscoder.us-west-1.amazonaws.com", + "us-west-2": "elastictranscoder.us-west-2.amazonaws.com" + }, + "glacier": { + "ap-northeast-1": "glacier.ap-northeast-1.amazonaws.com", + "ap-southeast-2": "glacier.ap-southeast-2.amazonaws.com", + "cn-north-1": "glacier.cn-north-1.amazonaws.com.cn", + "eu-west-1": "glacier.eu-west-1.amazonaws.com", + "us-east-1": "glacier.us-east-1.amazonaws.com", + "us-west-1": "glacier.us-west-1.amazonaws.com", + "us-west-2": "glacier.us-west-2.amazonaws.com" + }, + "iam": { + "ap-northeast-1": "iam.amazonaws.com", + "ap-southeast-1": "iam.amazonaws.com", + "ap-southeast-2": "iam.amazonaws.com", + "cn-north-1": "iam.cn-north-1.amazonaws.com.cn", + "eu-west-1": "iam.amazonaws.com", + "sa-east-1": "iam.amazonaws.com", + "us-east-1": "iam.amazonaws.com", + "us-gov-west-1": "iam.us-gov.amazonaws.com", + "us-west-1": "iam.amazonaws.com", + "us-west-2": "iam.amazonaws.com" + }, + "importexport": { + "ap-northeast-1": "importexport.amazonaws.com", + "ap-southeast-1": "importexport.amazonaws.com", + "ap-southeast-2": "importexport.amazonaws.com", + "eu-west-1": "importexport.amazonaws.com", + "sa-east-1": "importexport.amazonaws.com", + "us-east-1": "importexport.amazonaws.com", + "us-west-1": "importexport.amazonaws.com", + "us-west-2": "importexport.amazonaws.com" + }, + "kinesis": { + "us-east-1": "kinesis.us-east-1.amazonaws.com" + }, + "opsworks": { + "us-east-1": "opsworks.us-east-1.amazonaws.com" + }, + "rds": { + "ap-northeast-1": "rds.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "rds.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "rds.ap-southeast-2.amazonaws.com", + "cn-north-1": "rds.cn-north-1.amazonaws.com.cn", + "eu-west-1": "rds.eu-west-1.amazonaws.com", + "sa-east-1": "rds.sa-east-1.amazonaws.com", + "us-east-1": "rds.amazonaws.com", + "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", + "us-west-1": "rds.us-west-1.amazonaws.com", + "us-west-2": "rds.us-west-2.amazonaws.com" + }, + "redshift": { + "ap-northeast-1": "redshift.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "redshift.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "redshift.ap-southeast-2.amazonaws.com", + "eu-west-1": "redshift.eu-west-1.amazonaws.com", + "us-east-1": "redshift.us-east-1.amazonaws.com", + "us-west-2": "redshift.us-west-2.amazonaws.com" + }, + "route53": { + "ap-northeast-1": "route53.amazonaws.com", + "ap-southeast-1": "route53.amazonaws.com", + "ap-southeast-2": "route53.amazonaws.com", + "eu-west-1": "route53.amazonaws.com", + "sa-east-1": "route53.amazonaws.com", + "us-east-1": "route53.amazonaws.com", + "us-west-1": "route53.amazonaws.com", + "us-west-2": "route53.amazonaws.com" + }, + "s3": { + "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", + "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", + "eu-west-1": "s3-eu-west-1.amazonaws.com", + "sa-east-1": "s3-sa-east-1.amazonaws.com", + "us-east-1": "s3.amazonaws.com", + "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + "us-west-2": "s3-us-west-2.amazonaws.com" + }, + "sdb": { + "ap-northeast-1": "sdb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sdb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sdb.ap-southeast-2.amazonaws.com", + "eu-west-1": "sdb.eu-west-1.amazonaws.com", + "sa-east-1": "sdb.sa-east-1.amazonaws.com", + "us-east-1": "sdb.amazonaws.com", + "us-west-1": "sdb.us-west-1.amazonaws.com", + "us-west-2": "sdb.us-west-2.amazonaws.com" + }, + "ses": { + "eu-west-1": "email.eu-west-1.amazonaws.com", + "us-east-1": "email.us-east-1.amazonaws.com", + "us-west-2": "email.us-west-2.amazonaws.com" + }, + "sns": { + "ap-northeast-1": "sns.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sns.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sns.ap-southeast-2.amazonaws.com", + "cn-north-1": "sns.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sns.eu-west-1.amazonaws.com", + "sa-east-1": "sns.sa-east-1.amazonaws.com", + "us-east-1": "sns.us-east-1.amazonaws.com", + "us-gov-west-1": "sns.us-gov-west-1.amazonaws.com", + "us-west-1": "sns.us-west-1.amazonaws.com", + "us-west-2": "sns.us-west-2.amazonaws.com" + }, + "sqs": { + "ap-northeast-1": "ap-northeast-1.queue.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.queue.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.queue.amazonaws.com", + "cn-north-1": "sqs.cn-north-1.amazonaws.com.cn", + "eu-west-1": "eu-west-1.queue.amazonaws.com", + "sa-east-1": "sa-east-1.queue.amazonaws.com", + "us-east-1": "queue.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.queue.amazonaws.com", + "us-west-1": "us-west-1.queue.amazonaws.com", + "us-west-2": "us-west-2.queue.amazonaws.com" + }, + "storagegateway": { + "ap-northeast-1": "storagegateway.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "storagegateway.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "storagegateway.ap-southeast-2.amazonaws.com", + "eu-west-1": "storagegateway.eu-west-1.amazonaws.com", + "sa-east-1": "storagegateway.sa-east-1.amazonaws.com", + "us-east-1": "storagegateway.us-east-1.amazonaws.com", + "us-west-1": "storagegateway.us-west-1.amazonaws.com", + "us-west-2": "storagegateway.us-west-2.amazonaws.com" + }, + "sts": { + "ap-northeast-1": "sts.amazonaws.com", + "ap-southeast-1": "sts.amazonaws.com", + "ap-southeast-2": "sts.amazonaws.com", + "cn-north-1": "sts.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sts.amazonaws.com", + "sa-east-1": "sts.amazonaws.com", + "us-east-1": "sts.amazonaws.com", + "us-gov-west-1": "sts.us-gov-west-1.amazonaws.com", + "us-west-1": "sts.amazonaws.com", + "us-west-2": "sts.amazonaws.com" + }, + "support": { + "us-east-1": "support.us-east-1.amazonaws.com" + }, + "swf": { + "ap-northeast-1": "swf.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "swf.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "swf.ap-southeast-2.amazonaws.com", + "cn-north-1": "swf.cn-north-1.amazonaws.com.cn", + "eu-west-1": "swf.eu-west-1.amazonaws.com", + "sa-east-1": "swf.sa-east-1.amazonaws.com", + "us-east-1": "swf.us-east-1.amazonaws.com", + "us-gov-west-1": "swf.us-gov-west-1.amazonaws.com", + "us-west-1": "swf.us-west-1.amazonaws.com", + "us-west-2": "swf.us-west-2.amazonaws.com" + } +} diff -Nru python-boto-2.20.1/boto/exception.py python-boto-2.29.1/boto/exception.py --- python-boto-2.20.1/boto/exception.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/exception.py 2014-05-30 20:49:34.000000000 +0000 @@ -27,6 +27,7 @@ import base64 import xml.sax from boto import handler +from boto.compat import json from boto.resultset import ResultSet @@ -34,9 +35,8 @@ """ General Boto Client error (error accessing AWS) """ - def __init__(self, reason, *args): - StandardError.__init__(self, reason, *args) + super(BotoClientError, self).__init__(reason, *args) self.reason = reason def __repr__(self): @@ -45,32 +45,35 @@ def __str__(self): return 'BotoClientError: %s' % self.reason -class SDBPersistenceError(StandardError): +class SDBPersistenceError(StandardError): pass + class StoragePermissionsError(BotoClientError): """ Permissions error when accessing a bucket or key on a storage service. """ pass + class S3PermissionsError(StoragePermissionsError): """ Permissions error when accessing a bucket or key on S3. """ pass + class GSPermissionsError(StoragePermissionsError): """ Permissions error when accessing a bucket or key on GS. """ pass -class BotoServerError(StandardError): +class BotoServerError(StandardError): def __init__(self, status, reason, body=None, *args): - StandardError.__init__(self, status, reason, body, *args) + super(BotoServerError, self).__init__(status, reason, body, *args) self.status = status self.reason = reason self.body = body or '' @@ -82,16 +85,44 @@ # Attempt to parse the error response. If body isn't present, # then just ignore the error response. if self.body: - try: - h = handler.XmlHandlerWrapper(self, self) - h.parseString(self.body) - except (TypeError, xml.sax.SAXParseException), pe: - # Remove unparsable message body so we don't include garbage - # in exception. But first, save self.body in self.error_message - # because occasionally we get error messages from Eucalyptus - # that are just text strings that we want to preserve. - self.message = self.body - self.body = None + # Check if it looks like a ``dict``. + if hasattr(self.body, 'items'): + # It's not a string, so trying to parse it will fail. + # But since it's data, we can work with that. + self.request_id = self.body.get('RequestId', None) + + if 'Error' in self.body: + # XML-style + error = self.body.get('Error', {}) + self.error_code = error.get('Code', None) + self.message = error.get('Message', None) + else: + # JSON-style. + self.message = self.body.get('message', None) + else: + try: + h = handler.XmlHandlerWrapper(self, self) + h.parseString(self.body) + except (TypeError, xml.sax.SAXParseException), pe: + # What if it's JSON? Let's try that. + try: + parsed = json.loads(self.body) + + if 'RequestId' in parsed: + self.request_id = parsed['RequestId'] + if 'Error' in parsed: + if 'Code' in parsed['Error']: + self.error_code = parsed['Error']['Code'] + if 'Message' in parsed['Error']: + self.message = parsed['Error']['Message'] + + except (TypeError, ValueError): + # Remove unparsable message body so we don't include garbage + # in exception. But first, save self.body in self.error_message + # because occasionally we get error messages from Eucalyptus + # that are just text strings that we want to preserve. + self.message = self.body + self.body = None def __getattr__(self, name): if name == 'error_message': @@ -134,8 +165,8 @@ self.message = None self.box_usage = None -class ConsoleOutput: +class ConsoleOutput(object): def __init__(self, parent=None): self.parent = parent self.instance_id = None @@ -154,19 +185,20 @@ else: setattr(self, name, value) + class StorageCreateError(BotoServerError): """ Error creating a bucket or key on a storage service. """ def __init__(self, status, reason, body=None): self.bucket = None - BotoServerError.__init__(self, status, reason, body) + super(StorageCreateError, self).__init__(status, reason, body) def endElement(self, name, value, connection): if name == 'BucketName': self.bucket = value else: - return BotoServerError.endElement(self, name, value, connection) + return super(StorageCreateError, self).endElement(name, value, connection) class S3CreateError(StorageCreateError): """ @@ -174,30 +206,35 @@ """ pass + class GSCreateError(StorageCreateError): """ Error creating a bucket or key on GS. """ pass + class StorageCopyError(BotoServerError): """ Error copying a key on a storage service. """ pass + class S3CopyError(StorageCopyError): """ Error copying a key on S3. """ pass + class GSCopyError(StorageCopyError): """ Error copying a key on GS. """ pass + class SQSError(BotoServerError): """ General Error on Simple Queue Service. @@ -205,10 +242,10 @@ def __init__(self, status, reason, body=None): self.detail = None self.type = None - BotoServerError.__init__(self, status, reason, body) + super(SQSError, self).__init__(status, reason, body) def startElement(self, name, attrs, connection): - return BotoServerError.startElement(self, name, attrs, connection) + return super(SQSError, self).startElement(name, attrs, connection) def endElement(self, name, value, connection): if name == 'Detail': @@ -216,19 +253,20 @@ elif name == 'Type': self.type = value else: - return BotoServerError.endElement(self, name, value, connection) + return super(SQSError, self).endElement(name, value, connection) def _cleanupParsedProperties(self): - BotoServerError._cleanupParsedProperties(self) + super(SQSError, self)._cleanupParsedProperties() for p in ('detail', 'type'): setattr(self, p, None) + class SQSDecodeError(BotoClientError): """ Error when decoding an SQS message. """ def __init__(self, reason, message): - BotoClientError.__init__(self, reason, message) + super(SQSDecodeError, self).__init__(reason, message) self.message = message def __repr__(self): @@ -237,49 +275,54 @@ def __str__(self): return 'SQSDecodeError: %s' % self.reason + class StorageResponseError(BotoServerError): """ Error in response from a storage service. """ def __init__(self, status, reason, body=None): self.resource = None - BotoServerError.__init__(self, status, reason, body) + super(StorageResponseError, self).__init__(status, reason, body) def startElement(self, name, attrs, connection): - return BotoServerError.startElement(self, name, attrs, connection) + return super(StorageResponseError, self).startElement(name, attrs, + connection) def endElement(self, name, value, connection): if name == 'Resource': self.resource = value else: - return BotoServerError.endElement(self, name, value, connection) + return super(StorageResponseError, self).endElement(name, value, + connection) def _cleanupParsedProperties(self): - BotoServerError._cleanupParsedProperties(self) + super(StorageResponseError, self)._cleanupParsedProperties() for p in ('resource'): setattr(self, p, None) + class S3ResponseError(StorageResponseError): """ Error in response from S3. """ pass + class GSResponseError(StorageResponseError): """ Error in response from GS. """ pass + class EC2ResponseError(BotoServerError): """ Error in response from EC2. """ - def __init__(self, status, reason, body=None): self.errors = None self._errorResultSet = [] - BotoServerError.__init__(self, status, reason, body) + super(EC2ResponseError, self).__init__(status, reason, body) self.errors = [ (e.error_code, e.error_message) \ for e in self._errorResultSet ] if len(self.errors): @@ -299,11 +342,12 @@ return None # don't call subclass here def _cleanupParsedProperties(self): - BotoServerError._cleanupParsedProperties(self) + super(EC2ResponseError, self)._cleanupParsedProperties() self._errorResultSet = [] for p in ('errors'): setattr(self, p, None) + class JSONResponseError(BotoServerError): """ This exception expects the fully parsed and decoded JSON response @@ -342,8 +386,8 @@ """ pass -class _EC2Error: +class _EC2Error(object): def __init__(self, connection=None): self.connection = connection self.error_code = None @@ -360,6 +404,7 @@ else: return None + class SDBResponseError(BotoServerError): """ Error in responses from SDB. @@ -394,21 +439,21 @@ """Exception raised when URI is invalid.""" def __init__(self, message): - Exception.__init__(self, message) + super(InvalidUriError, self).__init__(message) self.message = message class InvalidAclError(Exception): """Exception raised when ACL XML is invalid.""" def __init__(self, message): - Exception.__init__(self, message) + super(InvalidAclError, self).__init__(message) self.message = message class InvalidCorsError(Exception): """Exception raised when CORS XML is invalid.""" def __init__(self, message): - Exception.__init__(self, message) + super(InvalidCorsError, self).__init__(message) self.message = message class NoAuthHandlerFound(Exception): @@ -419,7 +464,7 @@ """Exception raised when GCS lifecycle configuration XML is invalid.""" def __init__(self, message): - Exception.__init__(self, message) + super(InvalidLifecycleConfigError, self).__init__(message) self.message = message # Enum class for resumable upload failure disposition. @@ -454,7 +499,7 @@ """ def __init__(self, message, disposition): - Exception.__init__(self, message, disposition) + super(ResumableUploadException, self).__init__(message, disposition) self.message = message self.disposition = disposition @@ -470,7 +515,7 @@ """ def __init__(self, message, disposition): - Exception.__init__(self, message, disposition) + super(ResumableDownloadException, self).__init__(message, disposition) self.message = message self.disposition = disposition @@ -485,7 +530,7 @@ """ def __init__(self, message): - Exception.__init__(self, message) + super(TooManyRecordsException, self).__init__(message) self.message = message diff -Nru python-boto-2.20.1/boto/fps/connection.py python-boto-2.29.1/boto/fps/connection.py --- python-boto-2.20.1/boto/fps/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/fps/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -109,7 +109,7 @@ def __init__(self, *args, **kw): self.currencycode = kw.pop('CurrencyCode', self.currencycode) kw.setdefault('host', 'fps.sandbox.amazonaws.com') - AWSQueryConnection.__init__(self, *args, **kw) + super(FPSConnection, self).__init__(*args, **kw) def _required_auth_capability(self): return ['fps'] diff -Nru python-boto-2.20.1/boto/fps/response.py python-boto-2.29.1/boto/fps/response.py --- python-boto-2.20.1/boto/fps/response.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/fps/response.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,3 +1,26 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. from decimal import Decimal @@ -9,7 +32,7 @@ # due to nodes receiving their closing tags def endElement(self, name, value, connection): if name != action + 'Response': - Response.endElement(self, name, value, connection) + super(FPSResponse, self).endElement(name, value, connection) return FPSResponse @@ -48,7 +71,7 @@ elif name == self._action + 'Result': setattr(self, name, self._Result(name=name)) else: - return ResponseElement.startElement(self, name, attrs, connection) + return super(Response, self).startElement(name, attrs, connection) return getattr(self, name) @@ -66,12 +89,12 @@ if name not in ('CurrencyCode', 'Value'): message = 'Unrecognized tag {0} in ComplexAmount'.format(name) raise AssertionError(message) - return ResponseElement.startElement(self, name, attrs, connection) + return super(ComplexAmount, self).startElement(name, attrs, connection) def endElement(self, name, value, connection): if name == 'Value': value = Decimal(value) - ResponseElement.endElement(self, name, value, connection) + super(ComplexAmount, self).endElement(name, value, connection) class AmountCollection(ResponseElement): @@ -85,7 +108,7 @@ if name == 'AvailableBalances': setattr(self, name, AmountCollection(name=name)) return getattr(self, name) - return AmountCollection.startElement(self, name, attrs, connection) + return super(AccountBalance, self).startElement(name, attrs, connection) class GetAccountBalanceResult(ResponseElement): @@ -93,7 +116,8 @@ if name == 'AccountBalance': setattr(self, name, AccountBalance(name=name)) return getattr(self, name) - return Response.startElement(self, name, attrs, connection) + return super(GetAccountBalanceResult, self).startElement(name, attrs, + connection) class GetTotalPrepaidLiabilityResult(ResponseElement): @@ -101,7 +125,8 @@ if name == 'OutstandingPrepaidLiability': setattr(self, name, AmountCollection(name=name)) return getattr(self, name) - return Response.startElement(self, name, attrs, connection) + return super(GetTotalPrepaidLiabilityResult, self).startElement(name, + attrs, connection) class GetPrepaidBalanceResult(ResponseElement): @@ -109,7 +134,8 @@ if name == 'PrepaidBalance': setattr(self, name, AmountCollection(name=name)) return getattr(self, name) - return Response.startElement(self, name, attrs, connection) + return super(GetPrepaidBalanceResult, self).startElement(name, attrs, + connection) class GetOutstandingDebtBalanceResult(ResponseElement): @@ -117,7 +143,8 @@ if name == 'OutstandingDebt': setattr(self, name, AmountCollection(name=name)) return getattr(self, name) - return Response.startElement(self, name, attrs, connection) + return super(GetOutstandingDebtBalanceResult, self).startElement(name, + attrs, connection) class TransactionPart(ResponseElement): @@ -125,13 +152,14 @@ if name == 'FeesPaid': setattr(self, name, ComplexAmount(name=name)) return getattr(self, name) - return ResponseElement.startElement(self, name, attrs, connection) + return super(TransactionPart, self).startElement(name, attrs, + connection) class Transaction(ResponseElement): def __init__(self, *args, **kw): self.TransactionPart = [] - ResponseElement.__init__(self, *args, **kw) + super(Transaction, self).__init__(*args, **kw) def startElement(self, name, attrs, connection): if name == 'TransactionPart': @@ -140,19 +168,20 @@ if name in ('TransactionAmount', 'FPSFees', 'Balance'): setattr(self, name, ComplexAmount(name=name)) return getattr(self, name) - return ResponseElement.startElement(self, name, attrs, connection) + return super(Transaction, self).startElement(name, attrs, connection) class GetAccountActivityResult(ResponseElement): def __init__(self, *args, **kw): self.Transaction = [] - ResponseElement.__init__(self, *args, **kw) + super(GetAccountActivityResult, self).__init__(*args, **kw) def startElement(self, name, attrs, connection): if name == 'Transaction': getattr(self, name).append(Transaction(name=name)) return getattr(self, name)[-1] - return ResponseElement.startElement(self, name, attrs, connection) + return super(GetAccountActivityResult, self).startElement(name, attrs, + connection) class GetTransactionResult(ResponseElement): @@ -160,16 +189,18 @@ if name == 'Transaction': setattr(self, name, Transaction(name=name)) return getattr(self, name) - return ResponseElement.startElement(self, name, attrs, connection) + return super(GetTransactionResult, self).startElement(name, attrs, + connection) class GetTokensResult(ResponseElement): def __init__(self, *args, **kw): self.Token = [] - ResponseElement.__init__(self, *args, **kw) + super(GetTokensResult, self).__init__(*args, **kw) def startElement(self, name, attrs, connection): if name == 'Token': getattr(self, name).append(ResponseElement(name=name)) return getattr(self, name)[-1] - return ResponseElement.startElement(self, name, attrs, connection) + return super(GetTokensResult, self).startElement(name, attrs, + connection) diff -Nru python-boto-2.20.1/boto/glacier/concurrent.py python-boto-2.29.1/boto/glacier/concurrent.py --- python-boto-2.20.1/boto/glacier/concurrent.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/glacier/concurrent.py 2014-05-30 20:49:34.000000000 +0000 @@ -19,6 +19,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +from __future__ import with_statement + import os import math import threading diff -Nru python-boto-2.20.1/boto/glacier/__init__.py python-boto-2.29.1/boto/glacier/__init__.py --- python-boto-2.20.1/boto/glacier/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/glacier/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,25 +32,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.glacier.layer2 import Layer2 - return [RegionInfo(name='us-east-1', - endpoint='glacier.us-east-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='us-west-1', - endpoint='glacier.us-west-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='us-west-2', - endpoint='glacier.us-west-2.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='ap-northeast-1', - endpoint='glacier.ap-northeast-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='eu-west-1', - endpoint='glacier.eu-west-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='ap-southeast-2', - endpoint='glacier.ap-southeast-2.amazonaws.com', - connection_cls=Layer2), - ] + return get_regions('glacier', connection_cls=Layer2) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/glacier/job.py python-boto-2.29.1/boto/glacier/job.py --- python-boto-2.20.1/boto/glacier/job.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/glacier/job.py 2014-05-30 20:49:34.000000000 +0000 @@ -97,9 +97,12 @@ actual_tree_hash, response['TreeHash'], byte_range)) return response + def _calc_num_chunks(self, chunk_size): + return int(math.ceil(self.archive_size / float(chunk_size))) + def download_to_file(self, filename, chunk_size=DefaultPartSize, verify_hashes=True, retry_exceptions=(socket.error,)): - """Download an archive to a file. + """Download an archive to a file by name. :type filename: str :param filename: The name of the file where the archive @@ -114,11 +117,33 @@ the tree hashes for each downloaded chunk. """ - num_chunks = int(math.ceil(self.archive_size / float(chunk_size))) + num_chunks = self._calc_num_chunks(chunk_size) with open(filename, 'wb') as output_file: self._download_to_fileob(output_file, num_chunks, chunk_size, verify_hashes, retry_exceptions) + def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize, + verify_hashes=True, + retry_exceptions=(socket.error,)): + """Download an archive to a file object. + + :type output_file: file + :param output_file: The file object where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes, retry_exceptions): for i in xrange(num_chunks): diff -Nru python-boto-2.20.1/boto/glacier/layer1.py python-boto-2.29.1/boto/glacier/layer1.py --- python-boto-2.20.1/boto/glacier/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/glacier/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -33,9 +33,42 @@ class Layer1(AWSAuthConnection): + """ + Amazon Glacier is a storage solution for "cold data." + Amazon Glacier is an extremely low-cost storage service that + provides secure, durable and easy-to-use storage for data backup + and archival. With Amazon Glacier, customers can store their data + cost effectively for months, years, or decades. Amazon Glacier + also enables customers to offload the administrative burdens of + operating and scaling storage to AWS, so they don't have to worry + about capacity planning, hardware provisioning, data replication, + hardware failure and recovery, or time-consuming hardware + migrations. + + Amazon Glacier is a great storage choice when low storage cost is + paramount, your data is rarely retrieved, and retrieval latency of + several hours is acceptable. If your application requires fast or + frequent access to your data, consider using Amazon S3. For more + information, go to `Amazon Simple Storage Service (Amazon S3)`_. + + You can store any kind of data in any format. There is no maximum + limit on the total amount of data you can store in Amazon Glacier. + + If you are a first-time user of Amazon Glacier, we recommend that + you begin by reading the following sections in the Amazon Glacier + Developer Guide : + + + + `What is Amazon Glacier`_ - This section of the Developer Guide + describes the underlying data model, the operations it supports, + and the AWS SDKs that you can use to interact with the service. + + `Getting Started with Amazon Glacier`_ - The Getting Started + section walks you through the process of creating a vault, + uploading archives, creating jobs to download archives, retrieving + the job output, and deleting archives. + """ Version = '2012-06-01' - """Glacier API version.""" def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, account_id='-', is_secure=True, port=None, @@ -44,7 +77,8 @@ https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, - region=None, region_name='us-east-1'): + region=None, region_name='us-east-1', + profile_name=None): if not region: for reg in boto.glacier.regions(): @@ -54,13 +88,13 @@ self.region = region self.account_id = account_id - AWSAuthConnection.__init__(self, region.endpoint, + super(Layer1, self).__init__(region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug, https_connection_factory, path, provider, security_token, - suppress_consec_slashes) + suppress_consec_slashes, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -72,7 +106,7 @@ headers = {} headers['x-amz-glacier-version'] = self.Version uri = '/%s/%s' % (self.account_id, resource) - response = AWSAuthConnection.make_request(self, verb, uri, + response = super(Layer1, self).make_request(verb, uri, params=params, headers=headers, sender=sender, @@ -87,35 +121,39 @@ def list_vaults(self, limit=None, marker=None): """ - This operation lists all vaults owned by the calling user’s + This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name. By default, this operation returns up to 1,000 items. If there - are more vaults to list, the marker field in the response body - contains the vault Amazon Resource Name (ARN) at which to - continue the list with a new List Vaults request; otherwise, - the marker field is null. In your next List Vaults request you - set the marker parameter to the value Amazon Glacier returned - in the responses to your previous List Vaults request. You can - also limit the number of vaults returned in the response by - specifying the limit parameter in the request. - - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the List Vaults - operation returns up to 1,000 items. - - :type marker: str - :param marker: A string used for pagination. marker specifies - the vault ARN after which the listing of vaults should - begin. (The vault specified by marker is not included in - the returned list.) Get the marker value from a previous - List Vaults response. You need to include the marker only - if you are continuing the pagination of results started in - a previous List Vaults request. Specifying an empty value - ("") for the marker returns a list of vaults starting - from the first vault. + are more vaults to list, the response `marker` field contains + the vault Amazon Resource Name (ARN) at which to continue the + list with a new List Vaults request; otherwise, the `marker` + field is `null`. To return a list of vaults that begins at a + specific vault, set the `marker` request parameter to the + vault ARN you obtained from a previous List Vaults request. + You can also limit the number of vaults returned in the + response by specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `List + Vaults `_ in the Amazon Glacier Developer Guide . + + :type marker: string + :param marker: A string used for pagination. The marker specifies the + vault ARN after which the listing of vaults should begin. + + :type limit: string + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the List Vaults operation returns up to + 1,000 items. """ params = {} if limit: @@ -127,18 +165,31 @@ def describe_vault(self, vault_name): """ This operation returns information about a vault, including - the vault Amazon Resource Name (ARN), the date the vault was - created, the number of archives contained within the vault, - and the total size of all the archives in the vault. The - number of archives and their total size are as of the last - vault inventory Amazon Glacier generated. Amazon Glacier - generates vault inventories approximately daily. This means - that if you add or remove an archive from a vault, and then - immediately send a Describe Vault request, the response might - not reflect the changes. + the vault's Amazon Resource Name (ARN), the date the vault was + created, the number of archives it contains, and the total + size of all the archives in the vault. The number of archives + and their total size are as of the last inventory generation. + This means that if you add or remove an archive from a vault, + and then immediately use Describe Vault, the change in + contents will not be immediately reflected. If you want to + retrieve the latest inventory of the vault, use InitiateJob. + Amazon Glacier generates vault inventories approximately + daily. For more information, see `Downloading a Vault + Inventory in Amazon Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe + Vault `_ in the Amazon Glacier Developer Guide . - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('GET', uri) @@ -147,23 +198,34 @@ """ This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an - AWS account. You can create up to 1,000 vaults per - account. For information on creating more vaults, go to the - Amazon Glacier product detail page. + AWS account. You can create up to 1,000 vaults per account. If + you need to create more vaults, contact Amazon Glacier. You must use the following guidelines when naming a vault. - Names can be between 1 and 255 characters long. - Allowed characters are a–z, A–Z, 0–9, '_' (underscore), - '-' (hyphen), and '.' (period). - This operation is idempotent, you can send the same request - multiple times and it has no further effect after the first - time Amazon Glacier creates the specified vault. + + Names can be between 1 and 255 characters long. + + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' + (hyphen), and '.' (period). - :type vault_name: str - :param vault_name: The name of the new vault + + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('PUT', uri, ok_responses=(201,), @@ -172,50 +234,114 @@ def delete_vault(self, vault_name): """ This operation deletes a vault. Amazon Glacier will delete a - vault only if there are no archives in the vault as per the + vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not - removed) and Amazon Glacier returns an error. - - This operation is idempotent, you can send the same request - multiple times and it has no further effect after the first - time Amazon Glacier delete the specified vault. + removed) and Amazon Glacier returns an error. You can use + DescribeVault to return the number of archives in a vault, and + you can use `Initiate a Job (POST jobs)`_ to initiate a new + inventory retrieval for a vault. The inventory contains the + archive IDs you use to delete archives using `Delete Archive + (DELETE archive)`_. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in + the Amazon Glacier Developer Guide . - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) def get_vault_notifications(self, vault_name): """ - This operation retrieves the notification-configuration - subresource set on the vault. + This operation retrieves the `notification-configuration` + subresource of the specified vault. - :type vault_name: str - :param vault_name: The name of the new vault + For information about setting a notification configuration on + a vault, see SetVaultNotifications. If a notification + configuration for a vault is not set, the operation returns a + `404 Not Found` error. For more information about vault + notifications, see `Configuring Vault Notifications in Amazon + Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Get + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('GET', uri) def set_vault_notifications(self, vault_name, notification_config): """ - This operation retrieves the notification-configuration - subresource set on the vault. + This operation configures notifications that will be sent when + specific events happen to a vault. By default, you don't get + any notifications. + + To configure vault notifications, send a PUT request to the + `notification-configuration` subresource of the vault. The + request should include a JSON document that provides an Amazon + SNS topic and specific events for which you want Amazon + Glacier to send notifications to the topic. + + Amazon SNS topics must grant permission to the vault to be + allowed to publish notifications to the topic. You can + configure a vault to publish a notification for the following + vault events: + + + + **ArchiveRetrievalCompleted** This event occurs when a job + that was initiated for an archive retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + **InventoryRetrievalCompleted** This event occurs when a job + that was initiated for an inventory retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Set + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. - :type notification_config: dict - :param notification_config: A Python dictionary containing - an SNS Topic and events for which you want Amazon Glacier - to send notifications to the topic. Possible events are: - - * ArchiveRetrievalCompleted - occurs when a job that was - initiated for an archive retrieval is completed. - * InventoryRetrievalCompleted - occurs when a job that was - initiated for an inventory retrieval is completed. + :type vault_notification_config: dict + :param vault_notification_config: Provides options for specifying + notification configuration. The format of the dictionary is: @@ -229,11 +355,27 @@ def delete_vault_notifications(self, vault_name): """ - This operation deletes the notification-configuration - subresource set on the vault. + This operation deletes the notification configuration set for + a vault. The operation is eventually consistent;that is, it + might take some time for Amazon Glacier to completely disable + the notifications and you might still receive some + notifications for a short time after you send the delete + request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and + `Delete Vault Notification Configuration `_ in the Amazon + Glacier Developer Guide. - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) @@ -243,36 +385,80 @@ def list_jobs(self, vault_name, completed=None, status_code=None, limit=None, marker=None): """ - This operation lists jobs for a vault including jobs that are + This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. - :type vault_name: str + + Amazon Glacier retains recently completed jobs for a period + before deleting them; however, it eventually removes completed + jobs. The output of completed jobs can be retrieved. Retaining + completed jobs for a period of time after they have completed + enables you to get a job output in the event you miss the job + completion notification or your first attempt to download it + fails. For example, suppose you start an archive retrieval job + to download an archive. After the job completes, you start to + download the archive but encounter a network error. In this + scenario, you can retry and download the archive while the job + exists. + + + To retrieve an archive or retrieve a vault inventory from + Amazon Glacier, you first initiate a job, and after the job + completes, you download the data. For an archive retrieval, + the output is the archive data, and for an inventory + retrieval, it is the inventory list. The List Job operation + returns a list of these jobs sorted by job initiation time. + + This List Jobs operation supports pagination. By default, this + operation returns up to 1,000 jobs in the response. You should + always check the response for a `marker` at which to continue + the list; if there are no more items the `marker` is `null`. + To return a list of jobs that begins at a specific job, set + the `marker` request parameter to the value you obtained from + a previous List Jobs request. You can also limit the number of + jobs returned in the response by specifying the `limit` + parameter in the request. + + Additionally, you can filter the jobs list returned by + specifying an optional `statuscode` (InProgress, Succeeded, or + Failed) and `completed` (true, false) parameter. The + `statuscode` allows you to specify that only jobs that match a + specified status are returned. The `completed` parameter + allows you to specify that only jobs in a specific completion + state are returned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For the underlying REST API, go to `List Jobs `_ + + :type vault_name: string :param vault_name: The name of the vault. - :type completed: boolean - :param completed: Specifies the state of the jobs to return. - If a value of True is passed, only completed jobs will - be returned. If a value of False is passed, only - uncompleted jobs will be returned. If no value is - passed, all jobs will be returned. - - :type status_code: string - :param status_code: Specifies the type of job status to return. - Valid values are: InProgress|Succeeded|Failed. If not - specified, jobs with all status codes are returned. - - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the List Jobs - operation returns up to 1,000 items. - - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the job at which the listing of jobs should - begin. Get the marker value from a previous List Jobs - response. You need only include the marker if you are - continuing the pagination of results started in a previous - List Jobs request. + :type limit: string + :param limit: Specifies that the response be limited to the specified + number of items or fewer. If not specified, the List Jobs operation + returns up to 1,000 jobs. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the job at which the listing of jobs should begin. Get + the marker value from a previous List Jobs response. You need only + include the marker if you are continuing the pagination of results + started in a previous List Jobs request. + + :type statuscode: string + :param statuscode: Specifies the type of job status to return. You can + specify the following values: "InProgress", "Succeeded", or + "Failed". + + :type completed: string + :param completed: Specifies the state of the jobs to return. You can + specify `True` or `False`. """ params = {} @@ -292,39 +478,154 @@ This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon - Simple Notification Service (Amazon SNS) topic to notify after - Amazon Glacier completes the job. + SNS topic to notify after Amazon Glacier completes the job. + For more information about initiating a job, see InitiateJob. + + + This operation enables you to check the status of your job. + However, it is strongly recommended that you set up an Amazon + SNS topic and specify it in your initiate job request so that + Amazon Glacier can notify the topic after it completes the + job. - :type vault_name: str - :param vault_name: The name of the new vault - :type job_id: str - :param job_id: The ID of the job. + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For information about the underlying REST API, go to `Working + with Archives in Amazon Glacier`_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The ID of the job to describe. """ uri = 'vaults/%s/jobs/%s' % (vault_name, job_id) return self.make_request('GET', uri, ok_responses=(200,)) def initiate_job(self, vault_name, job_data): """ - This operation initiates a job of the specified - type. Retrieving an archive or a vault inventory are - asynchronous operations that require you to initiate a job. It - is a two-step process: - - * Initiate a retrieval job. - * After the job completes, download the bytes. - - The retrieval is executed asynchronously. When you initiate - a retrieval job, Amazon Glacier creates a job and returns a - job ID in the response. + This operation initiates a job of the specified type. In this + release, you can initiate a job to retrieve either an archive + or a vault inventory (a list of archives in a vault). + + Retrieving data from Amazon Glacier is a two-step process: + + + #. Initiate a retrieval job. + #. After the job completes, download the bytes. + + + The retrieval request is executed asynchronously. When you + initiate a retrieval job, Amazon Glacier creates a job and + returns a job ID in the response. When Amazon Glacier + completes the job, you can get the job output (archive or + inventory data). For information about getting job output, see + GetJobOutput operation. + + The job must complete before you can get its output. To + determine when a job is complete, you have the following + options: + + + + **Use Amazon SNS Notification** You can specify an Amazon + Simple Notification Service (Amazon SNS) topic to which Amazon + Glacier can post a notification after the job is completed. + You can specify an SNS topic per job request. The notification + is sent only after Amazon Glacier completes the job. In + addition to specifying an SNS topic per job request, you can + configure vault notifications for a vault so that job + notifications are always sent. For more information, see + SetVaultNotifications. + + **Get job details** You can make a DescribeJob request to + obtain job status information while a job is in progress. + However, it is more efficient to use an Amazon SNS + notification to determine when a job is complete. + + + + The information you get via notification is same that you get + by calling DescribeJob. + + + If for a specific event, you add both the notification + configuration on the vault and also specify an SNS topic in + your initiate job request, Amazon Glacier sends both + notifications. For more information, see + SetVaultNotifications. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + **About the Vault Inventory** + + Amazon Glacier prepares an inventory for each vault + periodically, every 24 hours. When you initiate a job for a + vault inventory, Amazon Glacier returns the last inventory for + the vault. The inventory data you get might be up to a day or + two days old. Also, the initiate inventory job might take some + time to complete before you can download the vault inventory. + So you do not want to retrieve a vault inventory for each + vault operation. However, in some scenarios, you might find + the vault inventory useful. For example, when you upload an + archive, you can provide an archive description but not an + archive name. Amazon Glacier provides you a unique archive ID, + an opaque string of characters. So, you might maintain your + own database that maps archive names to their corresponding + Amazon Glacier assigned archive IDs. You might find the vault + inventory useful in the event you need to reconcile + information in your database with the actual vault inventory. + + **About Ranged Archive Retrieval** + + You can initiate an archive retrieval for the whole archive or + a range of the archive. In the case of ranged archive + retrieval, you specify a byte range to return or the whole + archive. The range specified must be megabyte (MB) aligned, + that is the range start value must be divisible by 1 MB and + range end value plus 1 must be divisible by 1 MB or equal the + end of the archive. If the ranged archive retrieval is not + megabyte aligned, this operation returns a 400 response. + Furthermore, to ensure you get checksum values for data you + download using Get Job Output API, the range must be tree hash + aligned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Initiate a Job`_ and `Downloading a Vault Inventory`_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. - :type job_data: dict - :param job_data: A Python dictionary containing the - information about the requested job. The dictionary - can contain the following attributes: + :type job_parameters: dict + :param job_parameters: Provides options for specifying job information. + The dictionary can contain the following attributes: * ArchiveId - The ID of the archive you want to retrieve. This field is required only if the Type is set to @@ -340,6 +641,12 @@ archive-retrieval|inventory-retrieval * RetrievalByteRange - Optionally specify the range of bytes to retrieve. + * InventoryRetrievalParameters: Optional job parameters + * Format - The output format, like "JSON" + * StartDate - ISO8601 starting date string + * EndDate - ISO8601 ending date string + * Limit - Maximum number of entries + * Marker - A unique string used for pagination """ uri = 'vaults/%s/jobs' % vault_name @@ -353,27 +660,72 @@ def get_job_output(self, vault_name, job_id, byte_range=None): """ This operation downloads the output of the job you initiated - using Initiate a Job. Depending on the job type - you specified when you initiated the job, the output will be - either the content of an archive or a vault inventory. - - You can download all the job output or download a portion of - the output by specifying a byte range. In the case of an - archive retrieval job, depending on the byte range you - specify, Amazon Glacier returns the checksum for the portion - of the data. You can compute the checksum on the client and - verify that the values match to ensure the portion you - downloaded is the correct data. - - :type vault_name: str :param - :param vault_name: The name of the new vault - - :type job_id: str - :param job_id: The ID of the job. - - :type byte_range: tuple - :param range: A tuple of integers specifying the slice (in bytes) - of the archive you want to receive + using InitiateJob. Depending on the job type you specified + when you initiated the job, the output will be either the + content of an archive or a vault inventory. + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. That is, you can download the job + output within the 24 hours period after Amazon Glacier + completes the job. + + If the job output is large, then you can use the `Range` + request header to retrieve a portion of the output. This + allows you to download the entire output in smaller chunks of + bytes. For example, suppose you have 1 GB of job output you + want to download and you decide to download 128 MB chunks of + data at a time, which is a total of eight Get Job Output + requests. You use the following process to download the job + output: + + + #. Download a 128 MB chunk of output by specifying the + appropriate byte range using the `Range` header. + #. Along with the data, the response includes a checksum of + the payload. You compute the checksum of the payload on the + client and compare it with the checksum you received in the + response to ensure you received all the expected data. + #. Repeat steps 1 and 2 for all the eight 128 MB chunks of + output data, each time specifying the appropriate byte range. + #. After downloading all the parts of the job output, you have + a list of eight checksum values. Compute the tree hash of + these values to find the checksum of the entire output. Using + the Describe Job API, obtain job information of the job that + provided you the output. The response includes the checksum of + the entire archive stored in Amazon Glacier. You compare this + value with the checksum you computed to ensure you have + downloaded the entire archive content with no errors. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Downloading a Vault Inventory`_, `Downloading an Archive`_, + and `Get Job Output `_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The job ID whose data is downloaded. + + :type byte_range: string + :param byte_range: The range of bytes to retrieve from the output. For + example, if you want to download the first 1,048,576 bytes, specify + "Range: bytes=0-1048575". By default, this operation downloads the + entire output. """ response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), ('Content-Range', u'ContentRange'), @@ -392,13 +744,50 @@ def upload_archive(self, vault_name, archive, linear_hash, tree_hash, description=None): """ - This operation adds an archive to a vault. For a successful - upload, your data is durably persisted. In response, Amazon - Glacier returns the archive ID in the x-amz-archive-id header - of the response. You should save the archive ID returned so - that you can access the archive later. + This operation adds an archive to a vault. This is a + synchronous operation, and for a successful upload, your data + is durably persisted. Amazon Glacier returns the archive ID in + the `x-amz-archive-id` header of the response. + + You must use the archive ID to access your data in Amazon + Glacier. After you upload an archive, you should save the + archive ID returned so that you can retrieve or delete the + archive later. Besides saving the archive ID, you can also + index it and give it a friendly name to allow for better + searching. You can also use the optional archive description + field to specify how the archive is referred to in an external + index of archives, such as you might create in Amazon + DynamoDB. You can also get the vault inventory to obtain a + list of archive IDs in a vault. For more information, see + InitiateJob. + + You must provide a SHA256 tree hash of the data you are + uploading. For information about computing a SHA256 tree hash, + see `Computing Checksums`_. + + You can optionally specify an archive description of up to + 1,024 printable ASCII characters. You can get the archive + description when you either retrieve the archive or get the + vault inventory. For more information, see InitiateJob. Amazon + Glacier does not interpret the description in any way. An + archive description does not need to be unique. You cannot use + the description to retrieve or sort the archive list. + + Archives are immutable. After you upload an archive, you + cannot edit the archive or its description. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading an Archive in Amazon Glacier`_ and `Upload + Archive`_ in the Amazon Glacier Developer Guide . - :type vault_name: str :param + :type vault_name: str :param vault_name: The name of the vault :type archive: bytes @@ -414,7 +803,8 @@ tree hash, see http://goo.gl/u7chF. :type description: str - :param description: An optional description of the archive. + :param description: The optional description of the archive you + are uploading. """ response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location'), @@ -445,13 +835,39 @@ def delete_archive(self, vault_name, archive_id): """ - This operation deletes an archive from a vault. + This operation deletes an archive from a vault. Subsequent + requests to initiate a retrieval of this archive will fail. + Archive retrievals that are in progress for this archive ID + may or may not succeed according to the following scenarios: + + + + If the archive retrieval job is actively preparing the data + for download when Amazon Glacier receives the delete archive + request, the archival retrieval operation might fail. + + If the archive retrieval job has successfully prepared the + archive for download when Amazon Glacier receives the delete + archive request, you will be able to download the output. + + + This operation is idempotent. Attempting to delete an already- + deleted archive does not result in an error. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_ + in the Amazon Glacier Developer Guide . - :type vault_name: str - :param vault_name: The name of the new vault + :type vault_name: string + :param vault_name: The name of the vault. - :type archive_id: str - :param archive_id: The ID for the archive to be deleted. + :type archive_id: string + :param archive_id: The ID of the archive to delete. """ uri = 'vaults/%s/archives/%s' % (vault_name, archive_id) return self.make_request('DELETE', uri, ok_responses=(204,)) @@ -461,21 +877,65 @@ def initiate_multipart_upload(self, vault_name, part_size, description=None): """ - Initiate a multipart upload. Amazon Glacier creates a - multipart upload resource and returns it's ID. You use this - ID in subsequent multipart upload operations. + This operation initiates a multipart upload. Amazon Glacier + creates a multipart upload resource and returns its ID in the + response. The multipart upload ID is used in subsequent + requests to upload parts of an archive (see + UploadMultipartPart). + + When you initiate a multipart upload, you specify the part + size in number of bytes. The part size must be a megabyte + (1024 KB) multiplied by a power of 2-for example, 1048576 (1 + MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so + on. The minimum allowable part size is 1 MB, and the maximum + is 4 GB. + + Every part you upload to this resource (see + UploadMultipartPart), except the last one, must have the same + size. The last one can be the same size or smaller. For + example, suppose you want to upload a 16.2 MB file. If you + initiate the multipart upload with a part size of 4 MB, you + will upload four parts of 4 MB each and one part of 0.2 MB. + + + You don't need to know the size of the archive when you start + a multipart upload because Amazon Glacier does not require you + to specify the overall archive size. + + + After you complete the multipart upload, Amazon Glacier + removes the multipart upload resource referenced by the ID. + Amazon Glacier also removes the multipart upload resource if + you cancel the multipart upload or it may be removed if there + is no activity for a period of 24 hours. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Initiate Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + The part size must be a megabyte (1024 KB) multiplied by a power of + 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), + 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, + and the maximum is 4 GB (4096 MB). :type vault_name: str :param vault_name: The name of the vault. :type description: str - :param description: An optional description of the archive. + :param description: The archive description that you are uploading in + parts. :type part_size: int - :param part_size: The size of each part except the last, in bytes. - The part size must be a multiple of 1024 KB multiplied by - a power of 2. The minimum allowable part size is 1MB and the - maximum is 4GB. + :param part_size: The size of each part except the last, in bytes. The + last part can be smaller than this part size. """ response_headers = [('x-amz-multipart-upload-id', u'UploadId'), ('Location', u'Location')] @@ -491,24 +951,77 @@ def complete_multipart_upload(self, vault_name, upload_id, sha256_treehash, archive_size): """ - Call this to inform Amazon Glacier that all of the archive parts - have been uploaded and Amazon Glacier can now assemble the archive - from the uploaded parts. + You call this operation to inform Amazon Glacier that all the + archive parts have been uploaded and that Amazon Glacier can + now assemble the archive from the uploaded parts. After + assembling and saving the archive to the vault, Amazon Glacier + returns the URI path of the newly created archive resource. + Using the URI path, you can then access the archive. After you + upload an archive, you should save the archive ID returned to + retrieve the archive at a later point. You can also get the + vault inventory to obtain a list of archive IDs in a vault. + For more information, see InitiateJob. + + In the request, you must include the computed SHA256 tree hash + of the entire archive you have uploaded. For information about + computing a SHA256 tree hash, see `Computing Checksums`_. On + the server side, Amazon Glacier also constructs the SHA256 + tree hash of the assembled archive. If the values match, + Amazon Glacier saves the archive to the vault; otherwise, it + returns an error, and the operation fails. The ListParts + operation returns a list of parts uploaded for a specific + multipart upload. It includes checksum information for each + uploaded part that can be used to debug a bad checksum issue. + + Additionally, Amazon Glacier also checks for any missing + content ranges when assembling the archive, if missing content + ranges are found, Amazon Glacier returns an error and the + operation fails. + + Complete Multipart Upload is an idempotent operation. After + your first successful complete multipart upload, if you call + the operation again within a short period, the operation will + succeed and return the same archive ID. This is useful in the + event you experience a network issue that causes an aborted + connection or receive a 500 server error, in which case you + can repeat your Complete Multipart Upload request and get the + same archive ID without creating duplicate archives. Note, + however, that after the multipart upload completes, you cannot + call the List Parts operation and the multipart upload will + not appear in List Multipart Uploads response, even if + idempotent complete is possible. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Complete Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + :type checksum: string + :param checksum: The SHA256 tree hash of the entire archive. It is the + tree hash of SHA256 tree hash of the individual parts. If the value + you specify in the request does not match the SHA256 tree hash of + the final assembled archive as computed by Amazon Glacier, Amazon + Glacier returns an error and the request fails. :type vault_name: str :param vault_name: The name of the vault. :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :param upload_id: The upload ID of the multipart upload. :type sha256_treehash: str - :param sha256_treehash: The SHA256 tree hash of the entire - archive. It is the tree hash of SHA256 tree hash of the - individual parts. If the value you specify in the request - does not match the SHA256 tree hash of the final assembled - archive as computed by Amazon Glacier, Amazon Glacier - returns an error and the request fails. + :param sha256_treehash: The SHA256 tree hash of the entire archive. + It is the tree hash of SHA256 tree hash of the individual parts. + If the value you specify in the request does not match the SHA256 + tree hash of the final assembled archive as computed by Amazon + Glacier, Amazon Glacier returns an error and the request fails. :type archive_size: int :param archive_size: The total size, in bytes, of the entire @@ -527,37 +1040,90 @@ def abort_multipart_upload(self, vault_name, upload_id): """ - Call this to abort a multipart upload identified by the upload ID. + This operation aborts a multipart upload identified by the + upload ID. - :type vault_name: str + After the Abort Multipart Upload request succeeds, you cannot + upload any more parts to the multipart upload or complete the + multipart upload. Aborting a completed upload fails. However, + aborting an already-aborted upload will succeed, for a short + time. For more information about uploading a part and + completing a multipart upload, see UploadMultipartPart and + CompleteMultipartUpload. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `Abort + Multipart Upload`_ in the Amazon Glacier Developer Guide . + + :type vault_name: string :param vault_name: The name of the vault. - :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :type upload_id: string + :param upload_id: The upload ID of the multipart upload to delete. """ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) return self.make_request('DELETE', uri, ok_responses=(204,)) def list_multipart_uploads(self, vault_name, limit=None, marker=None): """ - Lists in-progress multipart uploads for the specified vault. + This operation lists in-progress multipart uploads for the + specified vault. An in-progress multipart upload is a + multipart upload that has been initiated by an + InitiateMultipartUpload request, but has not yet been + completed or aborted. The list returned in the List Multipart + Upload response has no guaranteed order. + + The List Multipart Uploads operation supports pagination. By + default, this operation returns up to 1,000 multipart uploads + in the response. You should always check the response for a + `marker` at which to continue the list; if there are no more + items the `marker` is `null`. To return a list of multipart + uploads that begins at a specific upload, set the `marker` + request parameter to the value you obtained from a previous + List Multipart Upload request. You can also limit the number + of uploads returned in the response by specifying the `limit` + parameter in the request. + + Note the difference between this operation and listing parts + (ListParts). The List Multipart Uploads operation lists all + multipart uploads for a vault and does not require a multipart + upload ID. The List Parts operation requires a multipart + upload ID since parts are associated with a single upload. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Multipart + Uploads `_ in the Amazon Glacier Developer Guide . - :type vault_name: str + :type vault_name: string :param vault_name: The name of the vault. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the operation - returns up to 1,000 items. - - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the item at which the listing should - begin. Get the marker value from a previous - response. You need only include the marker if you are - continuing the pagination of results started in a previous - request. + :type limit: string + :param limit: Specifies the maximum number of uploads returned in the + response body. If this value is not specified, the List Uploads + operation returns up to 1,000 uploads. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the upload at which the listing of uploads should begin. + Get the marker value from a previous List Uploads response. You + need only include the marker if you are continuing the pagination + of results started in a previous List Uploads request. """ params = {} if limit: @@ -569,27 +1135,51 @@ def list_parts(self, vault_name, upload_id, limit=None, marker=None): """ - Lists in-progress multipart uploads for the specified vault. + This operation lists the parts of an archive that have been + uploaded in a specific multipart upload. You can make this + request at any time during an in-progress multipart upload + before you complete the upload (see CompleteMultipartUpload. + List Parts returns an error for completed uploads. The list + returned in the List Parts response is sorted by part range. + + The List Parts operation supports pagination. By default, this + operation returns up to 1,000 uploaded parts in the response. + You should always check the response for a `marker` at which + to continue the list; if there are no more items the `marker` + is `null`. To return a list of parts that begins at a specific + part, set the `marker` request parameter to the value you + obtained from a previous List Parts request. You can also + limit the number of parts returned in the response by + specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Parts`_ + in the Amazon Glacier Developer Guide . - :type vault_name: str + :type vault_name: string :param vault_name: The name of the vault. - :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :type upload_id: string + :param upload_id: The upload ID of the multipart upload. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the operation - returns up to 1,000 items. - - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the item at which the listing should - begin. Get the marker value from a previous - response. You need only include the marker if you are - continuing the pagination of results started in a previous - request. + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the part at which the listing of parts should begin. Get + the marker value from the response of a previous List Parts + response. You need only include the marker if you are continuing + the pagination of results started in a previous List Parts request. + + :type limit: string + :param limit: Specifies the maximum number of parts returned in the + response body. If this value is not specified, the List Parts + operation returns up to 1,000 uploads. """ params = {} if limit: @@ -602,7 +1192,55 @@ def upload_part(self, vault_name, upload_id, linear_hash, tree_hash, byte_range, part_data): """ - Lists in-progress multipart uploads for the specified vault. + This operation uploads a part of an archive. You can upload + archive parts in any order. You can also upload them in + parallel. You can upload up to 10,000 parts for a multipart + upload. + + Amazon Glacier rejects your upload part request if any of the + following conditions is true: + + + + **SHA256 tree hash does not match**To ensure that part data + is not corrupted in transmission, you compute a SHA256 tree + hash of the part and include it in your request. Upon + receiving the part data, Amazon Glacier also computes a SHA256 + tree hash. If these hash values don't match, the operation + fails. For information about computing a SHA256 tree hash, see + `Computing Checksums`_. + + **Part size does not match**The size of each part except the + last must match the size specified in the corresponding + InitiateMultipartUpload request. The size of the last part + must be the same size as, or smaller than, the specified size. + If you upload a part whose size is smaller than the part size + you specified in your initiate multipart upload request and + that part is not the last part, then the upload part request + will succeed. However, the subsequent Complete Multipart + Upload request will fail. + + **Range does not align**The byte range value in the request + does not align with the part size specified in the + corresponding initiate request. For example, if you specify a + part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 + MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid + part ranges. However, if you set a range value of 2 MB to 6 + MB, the range does not align with the part size and the upload + will fail. + + + This operation is idempotent. If you upload the same part + multiple times, the data included in the most recent request + overwrites the previously uploaded data. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Upload Part `_ in the Amazon Glacier Developer Guide . :type vault_name: str :param vault_name: The name of the vault. @@ -621,8 +1259,11 @@ operation. :type byte_range: tuple of ints - :param byte_range: Identfies the range of bytes in the assembled - archive that will be uploaded in this part. + :param byte_range: Identifies the range of bytes in the assembled + archive that will be uploaded in this part. Amazon Glacier uses + this information to assemble the archive in the proper sequence. + The format of this header follows RFC 2616. An example header is + Content-Range:bytes 0-4194303/*. :type part_data: bytes :param part_data: The data to be uploaded for the part diff -Nru python-boto-2.20.1/boto/glacier/vault.py python-boto-2.29.1/boto/glacier/vault.py --- python-boto-2.20.1/boto/glacier/vault.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/glacier/vault.py 2014-05-30 20:49:34.000000000 +0000 @@ -300,7 +300,9 @@ return self.get_job(response['JobId']) def retrieve_inventory(self, sns_topic=None, - description=None): + description=None, byte_range=None, + start_date=None, end_date=None, + limit=None): """ Initiate a inventory retrieval job to list the items in the vault. You will need to wait for the notification from @@ -315,6 +317,18 @@ sends notification when the job is completed and the output is ready for you to download. + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + :rtype: str :return: The ID of the job """ @@ -323,6 +337,19 @@ job_data['SNSTopic'] = sns_topic if description is not None: job_data['Description'] = description + if byte_range is not None: + job_data['RetrievalByteRange'] = byte_range + if start_date is not None or end_date is not None or limit is not None: + rparams = {} + + if start_date is not None: + rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if end_date is not None: + rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if limit is not None: + rparams['Limit'] = limit + + job_data['InventoryRetrievalParameters'] = rparams response = self.layer1.initiate_job(self.name, job_data) return response['JobId'] @@ -340,6 +367,18 @@ sends notification when the job is completed and the output is ready for you to download. + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + :rtype: :class:`boto.glacier.job.Job` :return: A Job object representing the retrieval job. """ diff -Nru python-boto-2.20.1/boto/gs/bucketlistresultset.py python-boto-2.29.1/boto/gs/bucketlistresultset.py --- python-boto-2.20.1/boto/gs/bucketlistresultset.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/gs/bucketlistresultset.py 2014-05-30 20:49:34.000000000 +0000 @@ -38,7 +38,7 @@ generation_marker = rs.next_generation_marker more_results= rs.is_truncated -class VersionedBucketListResultSet: +class VersionedBucketListResultSet(object): """ A resultset for listing versions within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This diff -Nru python-boto-2.20.1/boto/gs/connection.py python-boto-2.29.1/boto/gs/connection.py --- python-boto-2.20.1/boto/gs/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/gs/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,25 +14,25 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -from boto.gs.bucket import Bucket +from boto.gs.bucket import Bucket from boto.s3.connection import S3Connection from boto.s3.connection import SubdomainCallingFormat from boto.s3.connection import check_lowercase_bucketname from boto.utils import get_utf8_value -class Location: +class Location(object): DEFAULT = 'US' EU = 'EU' class GSConnection(S3Connection): DefaultHost = 'storage.googleapis.com' - QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s' def __init__(self, gs_access_key_id=None, gs_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, @@ -40,7 +40,7 @@ host=DefaultHost, debug=0, https_connection_factory=None, calling_format=SubdomainCallingFormat(), path='/', suppress_consec_slashes=True): - S3Connection.__init__(self, gs_access_key_id, gs_secret_access_key, + super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, calling_format, path, "google", Bucket, @@ -52,12 +52,12 @@ """ Creates a new bucket. By default it's located in the USA. You can pass Location.EU to create bucket in the EU. You can also pass - a LocationConstraint for where the bucket should be located, and + a LocationConstraint for where the bucket should be located, and a StorageClass describing how the data should be stored. :type bucket_name: string :param bucket_name: The name of the new bucket. - + :type headers: dict :param headers: Additional headers to pass along with the request to GCS. @@ -70,7 +70,7 @@ :type storage_class: string :param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'. - + """ check_lowercase_bucketname(bucket_name) @@ -103,3 +103,27 @@ raise self.provider.storage_response_error( response.status, response.reason, body) + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + bucket = self.bucket_class(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket diff -Nru python-boto-2.20.1/boto/gs/key.py python-boto-2.29.1/boto/gs/key.py --- python-boto-2.20.1/boto/gs/key.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/gs/key.py 2014-05-30 20:49:34.000000000 +0000 @@ -109,6 +109,9 @@ self.metageneration = resp.getheader('x-goog-metageneration', None) self.generation = resp.getheader('x-goog-generation', None) + def handle_restore_headers(self, response): + return + def handle_addl_headers(self, headers): for key, value in headers: if key == 'x-goog-hash': @@ -219,7 +222,7 @@ with the stored object in the response. See http://goo.gl/sMkcC for details. """ - if self.bucket != None: + if self.bucket is not None: if res_download_handler: res_download_handler.get_file(self, fp, headers, cb, num_cb, torrent=torrent, @@ -407,19 +410,20 @@ contents. :type fp: file - :param fp: the file whose contents are to be uploaded + :param fp: The file whose contents are to be uploaded. :type headers: dict - :param headers: additional HTTP headers to be sent with the PUT request. + :param headers: (optional) Additional HTTP headers to be sent with the + PUT request. :type replace: bool - :param replace: If this parameter is False, the method will first check - to see if an object exists in the bucket with the same key. If it - does, it won't overwrite it. The default value is True which will - overwrite the object. + :param replace: (optional) If this parameter is False, the method will + first check to see if an object exists in the bucket with the same + key. If it does, it won't overwrite it. The default value is True + which will overwrite the object. :type cb: function - :param cb: a callback function that will be called to report + :param cb: (optional) Callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the @@ -432,43 +436,44 @@ during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` - :param policy: A canned ACL policy that will be applied to the new key - in GS. + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. - :type md5: A tuple containing the hexdigest version of the MD5 checksum - of the file as the first element and the Base64-encoded version of - the plain checksum as the second element. This is the same format - returned by the compute_md5 method. - :param md5: If you need to compute the MD5 for any reason prior to - upload, it's silly to have to do it twice so this param, if present, - will be used as the MD5 values of the file. Otherwise, the checksum - will be computed. - - :type res_upload_handler: ResumableUploadHandler - :param res_upload_handler: If provided, this handler will perform the - upload. + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. :type size: int - :param size: (optional) The Maximum number of bytes to read from - the file pointer (fp). This is useful when uploading - a file in multiple parts where you are splitting the - file up into different ranges to be uploaded. If not - specified, the default behaviour is to read all bytes - from the file pointer. Less bytes may be available. + :param size: (optional) The Maximum number of bytes to read from the + file pointer (fp). This is useful when uploading a file in multiple + parts where you are splitting the file up into different ranges to + be uploaded. If not specified, the default behaviour is to read all + bytes from the file pointer. Less bytes may be available. + Notes: - 1. The "size" parameter currently cannot be used when - a resumable upload handler is given but is still - useful for uploading part of a file as implemented - by the parent class. - 2. At present Google Cloud Storage does not support - multipart uploads. + 1. The "size" parameter currently cannot be used when a + resumable upload handler is given but is still useful for + uploading part of a file as implemented by the parent class. + 2. At present Google Cloud Storage does not support multipart + uploads. :type rewind: bool - :param rewind: (optional) If True, the file pointer (fp) will be - rewound to the start before any bytes are read from - it. The default behaviour is False which reads from - the current position of the file pointer (fp). + :param rewind: (optional) If True, the file pointer (fp) will be + rewound to the start before any bytes are read from it. The default + behaviour is False which reads from the current position of the + file pointer (fp). :type if_generation: int :param if_generation: (optional) If set to a generation number, the @@ -528,7 +533,7 @@ if hasattr(fp, 'name'): self.path = fp.name - if self.bucket != None: + if self.bucket is not None: if isinstance(fp, KeyFile): # Avoid EOF seek for KeyFile case as it's very inefficient. key = fp.getkey() @@ -552,12 +557,12 @@ fp.seek(spos) size = self.size - if md5 == None: + if md5 is None: md5 = self.compute_md5(fp, size) self.md5 = md5[0] self.base64md5 = md5[1] - if self.name == None: + if self.name is None: self.name = self.md5 if not replace: @@ -585,44 +590,47 @@ parameters. :type filename: string - :param filename: The name of the file that you want to put onto GS + :param filename: The name of the file that you want to put onto GS. :type headers: dict - :param headers: Additional headers to pass along with the request to GS. + :param headers: (optional) Additional headers to pass along with the + request to GS. :type replace: bool - :param replace: If True, replaces the contents of the file if it - already exists. + :param replace: (optional) If True, replaces the contents of the file + if it already exists. :type cb: function - :param cb: (optional) a callback function that will be called to report - progress on the download. The callback should accept two integer + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have - been successfully transmitted from GS and the second representing - the total number of bytes that need to be transmitted. + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. - :type cb: int + :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. - :type policy: :class:`boto.gs.acl.CannedACLStrings` - :param policy: A canned ACL policy that will be applied to the new key - in GS. - - :type md5: A tuple containing the hexdigest version of the MD5 checksum - of the file as the first element and the Base64-encoded version of - the plain checksum as the second element. This is the same format - returned by the compute_md5 method. - :param md5: If you need to compute the MD5 for any reason prior to - upload, it's silly to have to do it twice so this param, if present, - will be used as the MD5 values of the file. Otherwise, the checksum - will be computed. - - :type res_upload_handler: ResumableUploadHandler - :param res_upload_handler: If provided, this handler will perform the - upload. + :type policy: :py:attribute:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. :type if_generation: int :param if_generation: (optional) If set to a generation number, the @@ -792,7 +800,7 @@ the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: self.bucket.set_acl(acl_or_str, self.name, headers=headers, generation=generation, if_generation=if_generation, @@ -809,7 +817,7 @@ :rtype: :class:`.gs.acl.ACL` """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_acl(self.name, headers=headers, generation=generation) @@ -824,7 +832,7 @@ :rtype: str """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_xml_acl(self.name, headers=headers, generation=generation) @@ -852,7 +860,7 @@ the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, @@ -883,7 +891,7 @@ the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_canned_acl( acl_str, self.name, diff -Nru python-boto-2.20.1/boto/gs/resumable_upload_handler.py python-boto-2.29.1/boto/gs/resumable_upload_handler.py --- python-boto-2.20.1/boto/gs/resumable_upload_handler.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/gs/resumable_upload_handler.py 2014-05-30 20:49:34.000000000 +0000 @@ -102,13 +102,13 @@ # Ignore non-existent file (happens first time an upload # is attempted on a file), but warn user for other errors. if e.errno != errno.ENOENT: - # Will restart because self.tracker_uri == None. + # Will restart because self.tracker_uri is None. print('Couldn\'t read URI tracker file (%s): %s. Restarting ' 'upload from scratch.' % (self.tracker_file_name, e.strerror)) except InvalidUriError, e: # Warn user, but proceed (will restart because - # self.tracker_uri == None). + # self.tracker_uri is None). print('Invalid tracker URI (%s) found in URI tracker file ' '(%s). Restarting upload from scratch.' % (uri, self.tracker_file_name)) @@ -124,8 +124,9 @@ return f = None try: - f = open(self.tracker_file_name, 'w') - f.write(self.tracker_uri) + with os.fdopen(os.open(self.tracker_file_name, + os.O_WRONLY | os.O_CREAT, 0600), 'w') as f: + f.write(self.tracker_uri) except IOError, e: raise ResumableUploadException( 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' @@ -134,9 +135,6 @@ 'unwritable directory)' % (self.tracker_file_name, e.strerror), ResumableTransferDisposition.ABORT) - finally: - if f: - f.close() def _set_tracker_uri(self, uri): """ diff -Nru python-boto-2.20.1/boto/gs/user.py python-boto-2.29.1/boto/gs/user.py --- python-boto-2.20.1/boto/gs/user.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/gs/user.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # IN THE SOFTWARE. -class User: +class User(object): def __init__(self, parent=None, id='', name=''): if parent: parent.owner = self diff -Nru python-boto-2.20.1/boto/handler.py python-boto-2.29.1/boto/handler.py --- python-boto-2.20.1/boto/handler.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/handler.py 2014-05-30 20:49:34.000000000 +0000 @@ -32,7 +32,7 @@ def startElement(self, name, attrs): self.current_text = '' new_node = self.nodes[-1][1].startElement(name, attrs, self.connection) - if new_node != None: + if new_node is not None: self.nodes.append((name, new_node)) def endElement(self, name): diff -Nru python-boto-2.20.1/boto/https_connection.py python-boto-2.29.1/boto/https_connection.py --- python-boto-2.20.1/boto/https_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/https_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -109,8 +109,12 @@ if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) sock.connect((self.host, self.port)) - boto.log.debug("wrapping ssl socket; CA certificate file=%s", - self.ca_certs) + msg = "wrapping ssl socket; " + if self.ca_certs: + msg += "CA certificate file=%s" %self.ca_certs + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, cert_reqs=ssl.CERT_REQUIRED, diff -Nru python-boto-2.20.1/boto/iam/connection.py python-boto-2.29.1/boto/iam/connection.py --- python-boto-2.20.1/boto/iam/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/iam/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -26,11 +26,32 @@ from boto.iam.summarymap import SummaryMap from boto.connection import AWSQueryConnection - -ASSUME_ROLE_POLICY_DOCUMENT = json.dumps({ - 'Statement': [{'Principal': {'Service': ['ec2.amazonaws.com']}, - 'Effect': 'Allow', - 'Action': ['sts:AssumeRole']}]}) +DEFAULT_POLICY_DOCUMENTS = { + 'default': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, + 'amazonaws.com.cn': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com.cn'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, +} +# For backward-compatibility, we'll preserve this here. +ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default']) class IAMConnection(AWSQueryConnection): @@ -40,18 +61,18 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', - debug=0, https_connection_factory=None, - path='/', security_token=None, validate_certs=True): - AWSQueryConnection.__init__(self, aws_access_key_id, + debug=0, https_connection_factory=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + super(IAMConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): - #return ['iam'] return ['hmac-v4'] def get_response(self, action, params, path='/', parent=None, @@ -1006,13 +1027,35 @@ :param service: Default service to go to in the console. """ alias = self.get_account_alias() + if not alias: raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + resp = alias.get('list_account_aliases_response', {}) + result = resp.get('list_account_aliases_result', {}) + aliases = result.get('account_aliases', []) + + if not len(aliases): + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + # We'll just use the first one we find. + alias = aliases[0] + if self.host == 'iam.us-gov.amazonaws.com': - return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (alias, service) + return "https://%s.signin.amazonaws-us-gov.com/console/%s" % ( + alias, + service + ) + elif self.host.endswith('amazonaws.com.cn'): + return "https://%s.signin.amazonaws.cn/console/%s" % ( + alias, + service + ) else: - return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service) + return "https://%s.signin.aws.amazon.com/console/%s" % ( + alias, + service + ) def get_account_summary(self): """ @@ -1059,6 +1102,30 @@ params['Path'] = path return self.get_response('CreateInstanceProfile', params) + def _build_policy(self, assume_role_policy_document=None): + if assume_role_policy_document is not None: + if isinstance(assume_role_policy_document, basestring): + # Historically, they had to pass a string. If it's a string, + # assume the user has already handled it. + return assume_role_policy_document + else: + + for tld, policy in DEFAULT_POLICY_DOCUMENTS.items(): + if tld is 'default': + # Skip the default. We'll fall back to it if we don't find + # anything. + continue + + if self.host and self.host.endswith(tld): + assume_role_policy_document = policy + break + + if not assume_role_policy_document: + assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default'] + + # Dump the policy (either user-supplied ``dict`` or one of the defaults) + return json.dumps(assume_role_policy_document) + def create_role(self, role_name, assume_role_policy_document=None, path=None): """ Creates a new role for your AWS account. @@ -1070,21 +1137,19 @@ :type role_name: string :param role_name: Name of the role to create. - :type assume_role_policy_document: string + :type assume_role_policy_document: ``string`` or ``dict`` :param assume_role_policy_document: The policy that grants an entity permission to assume the role. :type path: string :param path: The path to the instance profile. """ - params = {'RoleName': role_name} - if assume_role_policy_document is None: - # This is the only valid assume_role_policy_document currently, so - # this is used as a default value if no assume_role_policy_document - # is provided. - params['AssumeRolePolicyDocument'] = ASSUME_ROLE_POLICY_DOCUMENT - else: - params['AssumeRolePolicyDocument'] = assume_role_policy_document + params = { + 'RoleName': role_name, + 'AssumeRolePolicyDocument': self._build_policy( + assume_role_policy_document + ), + } if path is not None: params['Path'] = path return self.get_response('CreateRole', params) diff -Nru python-boto-2.20.1/boto/iam/__init__.py python-boto-2.29.1/boto/iam/__init__.py --- python-boto-2.20.1/boto/iam/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/iam/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -22,8 +22,8 @@ # this is here for backward compatibility # originally, the IAMConnection class was defined here -from connection import IAMConnection -from boto.regioninfo import RegionInfo +from boto.iam.connection import IAMConnection +from boto.regioninfo import RegionInfo, get_regions class IAMRegionInfo(RegionInfo): @@ -50,13 +50,22 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [IAMRegionInfo(name='universal', - endpoint='iam.amazonaws.com', - connection_cls=IAMConnection), - IAMRegionInfo(name='us-gov-west-1', - endpoint='iam.us-gov.amazonaws.com', - connection_cls=IAMConnection) - ] + regions = get_regions( + 'iam', + region_cls=IAMRegionInfo, + connection_cls=IAMConnection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + IAMRegionInfo( + name='universal', + endpoint='iam.amazonaws.com', + connection_cls=IAMConnection + ) + ) + + return regions def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/__init__.py python-boto-2.29.1/boto/__init__.py --- python-boto-2.20.1/boto/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -27,6 +27,7 @@ from boto.pyami.config import Config, BotoConfigLocations from boto.storage_uri import BucketStorageUri, FileStorageUri import boto.plugin +import datetime import os import platform import re @@ -36,9 +37,12 @@ import urlparse from boto.exception import InvalidUriError -__version__ = '2.20.1' +__version__ = '2.29.1' Version = __version__ # for backware compatibility +# http://bugs.python.org/issue7980 +datetime.datetime.strptime('', '') + UserAgent = 'Boto/%s Python/%s %s/%s' % ( __version__, platform.python_version(), @@ -54,6 +58,7 @@ GENERATION_RE = re.compile(r'(?P.+)' r'#(?P[0-9]+)$') VERSION_RE = re.compile('(?P.+)#(?P.+)$') +ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): @@ -191,6 +196,11 @@ :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` :return: A connection to Amazon's Auto Scaling Service + + :type use_block_device_types bool + :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing + block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability + with the old incorrect style. """ from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, @@ -307,6 +317,25 @@ return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) +def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds2.layer1.RDSConnection` + :return: A connection to RDS + """ + from boto.rds2.layer1 import RDSConnection + return RDSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string @@ -624,13 +653,31 @@ :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key - :rtype: :class:`boto.ec2.autoscale.CloudSearchConnection` + :rtype: :class:`boto.cloudsearch.layer2.Layer2` :return: A connection to Amazon's CloudSearch service """ from boto.cloudsearch.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_cloudsearch2(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearch2.layer2.Layer2` + :return: A connection to Amazon's CloudSearch2 service + """ + from boto.cloudsearch2.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + def connect_beanstalk(aws_access_key_id=None, aws_secret_access_key=None, diff -Nru python-boto-2.20.1/boto/jsonresponse.py python-boto-2.29.1/boto/jsonresponse.py --- python-boto-2.20.1/boto/jsonresponse.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/jsonresponse.py 2014-05-30 20:49:34.000000000 +0000 @@ -33,7 +33,7 @@ def startElement(self, name, attrs): self.current_text = '' t = self.nodes[-1][1].startElement(name, attrs, self.connection) - if t != None: + if t is not None: if isinstance(t, tuple): self.nodes.append(t) else: diff -Nru python-boto-2.20.1/boto/kinesis/__init__.py python-boto-2.29.1/boto/kinesis/__init__.py --- python-boto-2.20.1/boto/kinesis/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/kinesis/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,11 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.kinesis.layer1 import KinesisConnection - - return [RegionInfo(name='us-east-1', - endpoint='kinesis.us-east-1.amazonaws.com', - connection_cls=KinesisConnection), - ] + return get_regions('kinesis', connection_cls=KinesisConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/kinesis/layer1.py python-boto-2.29.1/boto/kinesis/layer1.py --- python-boto-2.20.1/boto/kinesis/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/kinesis/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -65,7 +65,7 @@ self.DefaultRegionEndpoint) if 'host' not in kwargs: kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(KinesisConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): @@ -532,11 +532,10 @@ placed and the sequence number that was assigned to the data record. - The `SequenceNumberForOrdering` sets the initial sequence - number for the partition key. Later `PutRecord` requests to - the same partition key (from the same client) will - automatically increase from `SequenceNumberForOrdering`, - ensuring strict sequential ordering. + Sequence numbers generally increase over time. To guarantee + strictly increasing ordering, use the + `SequenceNumberForOrdering` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. If a `PutRecord` request cannot be processed because of insufficient provisioned throughput on the shard involved in @@ -550,8 +549,10 @@ :param stream_name: The name of the stream to put the data record into. :type data: blob - :param data: The data blob to put into the record, which will be Base64 - encoded. The maximum size of the data blob is 50 kilobytes (KB). + :param data: The data blob to put into the record, which is + Base64-encoded when the blob is serialized. + The maximum size of the data blob (the payload after + Base64-decoding) is 50 kilobytes (KB) Set `b64_encode` to disable automatic Base64 encoding. :type partition_key: string @@ -571,10 +572,12 @@ partition key hash. :type sequence_number_for_ordering: string - :param sequence_number_for_ordering: The sequence number to use as the - initial number for the partition key. Subsequent calls to - `PutRecord` from the same client and for the same partition key - will increase from the `SequenceNumberForOrdering` value. + :param sequence_number_for_ordering: Guarantees strictly increasing + sequence numbers, for puts from the same client and to the same + partition key. Usage: set the `SequenceNumberForOrdering` of record + n to the sequence number of record n-1 (as returned in the + PutRecordResult when putting record n-1 ). If this parameter is not + set, records will be coarsely ordered based on arrival time. :type b64_encode: boolean :param b64_encode: Whether to Base64 encode `data`. Can be set to diff -Nru python-boto-2.20.1/boto/manage/cmdshell.py python-boto-2.29.1/boto/manage/cmdshell.py --- python-boto-2.20.1/boto/manage/cmdshell.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/manage/cmdshell.py 2014-05-30 20:49:34.000000000 +0000 @@ -118,7 +118,7 @@ def run(self, command): """ Execute a command on the remote host. Return a tuple containing - an integer status and a two strings, the first containing stdout + an integer status and two strings, the first containing stdout and the second containing stderr from the command. """ boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) @@ -182,7 +182,7 @@ log_fp = StringIO.StringIO() process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - while process.poll() == None: + while process.poll() is None: time.sleep(1) t = process.communicate() log_fp.write(t[0]) diff -Nru python-boto-2.20.1/boto/manage/server.py python-boto-2.29.1/boto/manage/server.py --- python-boto-2.20.1/boto/manage/server.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/manage/server.py 2014-05-30 20:49:34.000000000 +0000 @@ -15,7 +15,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -134,10 +134,10 @@ if ami.location.find('pyami') >= 0: my_amis.append((ami.location, ami)) return my_amis - + def get_region(self, params): region = params.get('region', None) - if isinstance(region, str) or isinstance(region, unicode): + if isinstance(region, basestring): region = boto.ec2.get_region(region) params['region'] = region if not region: @@ -171,7 +171,7 @@ prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', choices=self.ec2.get_all_zones) params['zone'] = propget.get(prop) - + def get_ami_id(self, params): valid = False while not valid: @@ -189,7 +189,7 @@ def get_group(self, params): group = params.get('group', None) - if isinstance(group, str) or isinstance(group, unicode): + if isinstance(group, basestring): group_list = self.ec2.get_all_security_groups() for g in group_list: if g.name == group: @@ -202,7 +202,7 @@ def get_key(self, params): keypair = params.get('keypair', None) - if isinstance(keypair, str) or isinstance(keypair, unicode): + if isinstance(keypair, basestring): key_list = self.ec2.get_all_key_pairs() for k in key_list: if k.name == keypair: @@ -271,20 +271,20 @@ """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. - - If the config_file argument is not None, the configuration is read from there. + + If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. - + The config file may include other config files with a #import reference. The included - config files must reside in the same directory as the specified file. - - The logical_volume argument, if supplied, will be used to get the current physical - volume ID and use that as an override of the value specified in the config file. This - may be useful for debugging purposes when you want to debug with a production config - file but a test Volume. - - The dictionary argument may be used to override any EC2 configuration values in the - config file. + config files must reside in the same directory as the specified file. + + The logical_volume argument, if supplied, will be used to get the current physical + volume ID and use that as an override of the value specified in the config file. This + may be useful for debugging purposes when you want to debug with a production config + file but a test Volume. + + The dictionary argument may be used to override any EC2 configuration values in the + config file. """ if config_file: cfg = Config(path=config_file) @@ -304,7 +304,7 @@ zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: - cfg.set('EBS', 'logical_volume_name', logical_volume.name) + cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: @@ -323,12 +323,12 @@ i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances - if elastic_ip != None and instances.__len__() > 0: + if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: - time.sleep(5) + time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) @@ -346,7 +346,7 @@ l.append(s) i += 1 return l - + @classmethod def create_from_instance_id(cls, instance_id, name, description=''): regions = boto.ec2.regions() @@ -393,9 +393,9 @@ s.put() servers.append(s) return servers - + def __init__(self, id=None, **kw): - Model.__init__(self, id, **kw) + super(Server, self).__init__(id, **kw) self.ssh_key_file = None self.ec2 = None self._cmdshell = None @@ -421,7 +421,7 @@ self._instance = instance except EC2ResponseError: pass - + def _status(self): status = '' if self._instance: @@ -484,14 +484,14 @@ return kn def put(self): - Model.put(self) + super(Server, self).put() self._setup_ec2() def delete(self): if self.production: raise ValueError("Can't delete a production server") #self.stop() - Model.delete(self) + super(Server, self).delete() def stop(self): if self.production: @@ -553,4 +553,4 @@ return self.run('apt-get -y install %s' % pkg) - + diff -Nru python-boto-2.20.1/boto/manage/task.py python-boto-2.29.1/boto/manage/task.py --- python-boto-2.20.1/boto/manage/task.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/manage/task.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -30,7 +30,7 @@ return if int(val) < 0 or int(val) > 23: raise ValueError - + class Task(Model): """ @@ -40,10 +40,10 @@ To keep the operation reasonably efficient and not cause excessive polling, the minimum granularity of a Task is hourly. Some examples: - + hour='*' - the task would be executed each hour hour='3' - the task would be executed at 3AM GMT each day. - + """ name = StringProperty() hour = StringProperty(required=True, validator=check_hour, default='*') @@ -57,13 +57,13 @@ def start_all(cls, queue_name): for task in cls.all(): task.start(queue_name) - + def __init__(self, id=None, **kw): - Model.__init__(self, id, **kw) + super(Task, self).__init__(id, **kw) self.hourly = self.hour == '*' self.daily = self.hour != '*' self.now = datetime.datetime.utcnow() - + def check(self): """ Determine how long until the next scheduled time for a Task. @@ -76,7 +76,7 @@ if self.hourly and not self.last_executed: return 0 - + if self.daily and not self.last_executed: if int(self.hour) == self.now.hour: return 0 @@ -97,7 +97,7 @@ return 82800 # 23 hours, just to be safe else: return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 - + def _run(self, msg, vtimeout): boto.log.info('Task[%s] - running:%s' % (self.name, self.command)) log_fp = StringIO.StringIO() @@ -105,7 +105,7 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) nsecs = 5 current_timeout = vtimeout - while process.poll() == None: + while process.poll() is None: boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) if nsecs >= current_timeout: current_timeout += vtimeout @@ -170,6 +170,6 @@ - - + + diff -Nru python-boto-2.20.1/boto/manage/volume.py python-boto-2.29.1/boto/manage/volume.py --- python-boto-2.20.1/boto/manage/volume.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/manage/volume.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -33,7 +33,7 @@ class CommandLineGetter(object): - + def get_region(self, params): if not params.get('region', None): prop = self.cls.find_property('region_name') @@ -44,7 +44,7 @@ prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', choices=self.ec2.get_all_zones) params['zone'] = propget.get(prop) - + def get_name(self, params): if not params.get('name', None): prop = self.cls.find_property('name') @@ -136,7 +136,7 @@ if size < self.size: size = self.size ec2 = self.get_ec2_connection() - if self.zone_name == None or self.zone_name == '': + if self.zone_name is None or self.zone_name == '': # deal with the migration case where the zone is not set in the logical volume: current_volume = ec2.get_all_volumes([self.volume_id])[0] self.zone_name = current_volume.zone @@ -151,11 +151,11 @@ v.zone_name = self.zone_name v.put() return v - + def get_ec2_connection(self): if self.server: return self.server.ec2 - if not hasattr(self, 'ec2') or self.ec2 == None: + if not hasattr(self, 'ec2') or self.ec2 is None: self.ec2 = boto.ec2.connect_to_region(self.region_name) return self.ec2 @@ -209,7 +209,7 @@ def detach(self, force=False): state = self.attachment_state - if state == 'available' or state == None or state == 'detaching': + if state == 'available' or state is None or state == 'detaching': print 'already detached' return None ec2 = self.get_ec2_connection() @@ -218,7 +218,7 @@ self.put() def checkfs(self, use_cmd=None): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') # detemine state of file system on volume, only works if attached if use_cmd: @@ -233,7 +233,7 @@ return True def wait(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') with closing(self.server.get_cmdshell()) as cmd: # wait for the volume device to appear @@ -243,7 +243,7 @@ time.sleep(10) def format(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') status = None with closing(self.server.get_cmdshell()) as cmd: @@ -253,7 +253,7 @@ return status def mount(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') boto.log.info('handle_mount_point') with closing(self.server.get_cmdshell()) as cmd: @@ -302,7 +302,7 @@ # we need to freeze the XFS file system try: self.freeze() - if self.server == None: + if self.server is None: snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) else: snapshot = self.server.ec2.create_snapshot(self.volume_id) @@ -396,7 +396,7 @@ boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) snap.delete() return snaps - + def grow(self, size): pass @@ -411,10 +411,10 @@ self.detach() ec2 = self.get_ec2_connection() ec2.delete_volume(self.volume_id) - Model.delete(self) + super(Volume, self).delete() def archive(self): # snapshot volume, trim snaps, delete volume-id pass - + diff -Nru python-boto-2.20.1/boto/mashups/order.py python-boto-2.29.1/boto/mashups/order.py --- python-boto-2.20.1/boto/mashups/order.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mashups/order.py 2014-05-30 20:49:34.000000000 +0000 @@ -179,7 +179,7 @@ item.ami.id, item.groups, item.key.name) def place(self, block=True): - if get_domain() == None: + if get_domain() is None: print 'SDB Persistence Domain not set' domain_name = self.get_string('Specify SDB Domain') set_domain(domain_name) diff -Nru python-boto-2.20.1/boto/mashups/server.py python-boto-2.29.1/boto/mashups/server.py --- python-boto-2.20.1/boto/mashups/server.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mashups/server.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -87,7 +87,7 @@ return s def __init__(self, id=None, **kw): - Model.__init__(self, id, **kw) + super(Server, self).__init__(id, **kw) self._reservation = None self._instance = None self._ssh_client = None @@ -123,13 +123,13 @@ return self._instance instance = property(getInstance, setReadOnly, None, 'The Instance for the server') - + def getAMI(self): if self.instance: return self.instance.image_id ami = property(getAMI, setReadOnly, None, 'The AMI for the server') - + def getStatus(self): if self.instance: self.instance.update() @@ -137,7 +137,7 @@ status = property(getStatus, setReadOnly, None, 'The status of the server') - + def getHostname(self): if self.instance: return self.instance.public_dns_name diff -Nru python-boto-2.20.1/boto/mturk/connection.py python-boto-2.29.1/boto/mturk/connection.py --- python-boto-2.20.1/boto/mturk/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -46,7 +46,8 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=None, debug=0, - https_connection_factory=None): + https_connection_factory=None, security_token=None, + profile_name=None): if not host: if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True': host = 'mechanicalturk.sandbox.amazonaws.com' @@ -54,11 +55,13 @@ host = 'mechanicalturk.amazonaws.com' self.debug = debug - AWSQueryConnection.__init__(self, aws_access_key_id, + super(MTurkConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, - https_connection_factory) + https_connection_factory, + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): return ['mturk'] @@ -385,15 +388,15 @@ The number of assignments on the page in the filtered results list, equivalent to the number of assignments being returned by this call. - A non-negative integer + A non-negative integer, as a string. PageNumber The number of the page in the filtered results list being returned. - A positive integer + A positive integer, as a string. TotalNumResults The total number of HITs in the filtered results list based on this call. - A non-negative integer + A non-negative integer, as a string. The ResultSet will contain zero or more Assignment objects @@ -875,7 +878,7 @@ return duration -class BaseAutoResultElement: +class BaseAutoResultElement(object): """ Base class to automatically add attributes when parsing XML """ @@ -955,7 +958,7 @@ """ def __init__(self, connection): - BaseAutoResultElement.__init__(self, connection) + super(QualificationRequest, self).__init__(connection) self.answers = [] def endElement(self, name, value, connection): @@ -967,7 +970,7 @@ xml.sax.parseString(value, h) self.answers.append(answer_rs) else: - BaseAutoResultElement.endElement(self, name, value, connection) + super(QualificationRequest, self).endElement(name, value, connection) class Assignment(BaseAutoResultElement): @@ -980,7 +983,7 @@ """ def __init__(self, connection): - BaseAutoResultElement.__init__(self, connection) + super(Assignment, self).__init__(connection) self.answers = [] def endElement(self, name, value, connection): @@ -992,7 +995,7 @@ xml.sax.parseString(value, h) self.answers.append(answer_rs) else: - BaseAutoResultElement.endElement(self, name, value, connection) + super(Assignment, self).endElement(name, value, connection) class QuestionFormAnswer(BaseAutoResultElement): @@ -1016,7 +1019,7 @@ """ def __init__(self, connection): - BaseAutoResultElement.__init__(self, connection) + super(QuestionFormAnswer, self).__init__(connection) self.fields = [] self.qid = None diff -Nru python-boto-2.20.1/boto/mturk/layoutparam.py python-boto-2.29.1/boto/mturk/layoutparam.py --- python-boto-2.20.1/boto/mturk/layoutparam.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/layoutparam.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,15 +14,15 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class LayoutParameters: +class LayoutParameters(object): def __init__(self, layoutParameters=None): - if layoutParameters == None: + if layoutParameters is None: layoutParameters = [] self.layoutParameters = layoutParameters @@ -46,7 +46,7 @@ def __init__(self, name, value): self.name = name self.value = value - + def get_as_params(self): params = { "Name": self.name, diff -Nru python-boto-2.20.1/boto/mturk/notification.py python-boto-2.29.1/boto/mturk/notification.py --- python-boto-2.20.1/boto/mturk/notification.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/notification.py 2014-05-30 20:49:34.000000000 +0000 @@ -32,7 +32,7 @@ import base64 import re -class NotificationMessage: +class NotificationMessage(object): NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl" NOTIFICATION_VERSION = '2006-05-05' @@ -88,7 +88,7 @@ signature_calc = base64.b64encode(h.digest()) return self.signature == signature_calc -class Event: +class Event(object): def __init__(self, d): self.event_type = d['EventType'] self.event_time_str = d['EventTime'] diff -Nru python-boto-2.20.1/boto/mturk/price.py python-boto-2.29.1/boto/mturk/price.py --- python-boto-2.20.1/boto/mturk/price.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/price.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,12 +14,12 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class Price: +class Price(object): def __init__(self, amount=0.0, currency_code='USD'): self.amount = amount diff -Nru python-boto-2.20.1/boto/mturk/qualification.py python-boto-2.29.1/boto/mturk/qualification.py --- python-boto-2.20.1/boto/mturk/qualification.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/qualification.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,15 +14,15 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class Qualifications: +class Qualifications(object): def __init__(self, requirements=None): - if requirements == None: + if requirements is None: requirements = [] self.requirements = requirements @@ -49,7 +49,7 @@ self.comparator = comparator self.integer_value = integer_value self.required_to_preview = required_to_preview - + def get_as_params(self): params = { "QualificationTypeId": self.qualification_type_id, @@ -67,7 +67,7 @@ """ def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(PercentAssignmentsSubmittedRequirement, self).__init__(qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsAbandonedRequirement(Requirement): """ @@ -75,7 +75,7 @@ """ def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(PercentAssignmentsAbandonedRequirement, self).__init__(qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsReturnedRequirement(Requirement): """ @@ -83,7 +83,7 @@ """ def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(PercentAssignmentsReturnedRequirement, self).__init__(qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsApprovedRequirement(Requirement): """ @@ -91,7 +91,7 @@ """ def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(PercentAssignmentsApprovedRequirement, self).__init__(qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsRejectedRequirement(Requirement): """ @@ -99,15 +99,15 @@ """ def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(PercentAssignmentsRejectedRequirement, self).__init__(qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class NumberHitsApprovedRequirement(Requirement): """ Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0. """ - + def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(NumberHitsApprovedRequirement, self).__init__(qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class LocaleRequirement(Requirement): """ @@ -115,7 +115,7 @@ """ def __init__(self, comparator, locale, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) + super(LocaleRequirement, self).__init__(qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) self.locale = locale def get_as_params(self): @@ -132,6 +132,6 @@ """ Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default). """ - + def __init__(self, comparator, integer_value, required_to_preview=False): - Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + super(AdultRequirement, self).__init__(qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) diff -Nru python-boto-2.20.1/boto/mturk/question.py python-boto-2.29.1/boto/mturk/question.py --- python-boto-2.20.1/boto/mturk/question.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mturk/question.py 2014-05-30 20:49:34.000000000 +0000 @@ -82,12 +82,12 @@ return self.template % vars(self) -class XMLTemplate: +class XMLTemplate(object): def get_as_xml(self): return self.template % vars(self) -class SimpleField(object, XMLTemplate): +class SimpleField(XMLTemplate): """ A Simple name/value pair that can be easily rendered as XML. @@ -101,7 +101,7 @@ self.value = value -class Binary(object, XMLTemplate): +class Binary(XMLTemplate): template = """%(type)s%(subtype)s%(url)s%(alt_text)s""" def __init__(self, type, subtype, url, alt_text): @@ -179,7 +179,7 @@ super(Flash, self).get_inner_content(content) -class FormattedContent(object, XMLTemplate): +class FormattedContent(XMLTemplate): schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd' template = '' diff -Nru python-boto-2.20.1/boto/mws/connection.py python-boto-2.29.1/boto/mws/connection.py --- python-boto-2.20.1/boto/mws/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mws/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -22,26 +22,37 @@ import hashlib import base64 import string +import collections from boto.connection import AWSQueryConnection -from boto.mws.exception import ResponseErrorFactory -from boto.mws.response import ResponseFactory, ResponseElement -from boto.handler import XmlHandler +from boto.exception import BotoServerError +import boto.mws.exception import boto.mws.response +from boto.handler import XmlHandler __all__ = ['MWSConnection'] api_version_path = { - 'Feeds': ('2009-01-01', 'Merchant', '/'), - 'Reports': ('2009-01-01', 'Merchant', '/'), - 'Orders': ('2011-01-01', 'SellerId', '/Orders/2011-01-01'), - 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), - 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), - 'Inbound': ('2010-10-01', 'SellerId', - '/FulfillmentInboundShipment/2010-10-01'), - 'Outbound': ('2010-10-01', 'SellerId', - '/FulfillmentOutboundShipment/2010-10-01'), - 'Inventory': ('2010-10-01', 'SellerId', - '/FulfillmentInventory/2010-10-01'), + 'Feeds': ('2009-01-01', 'Merchant', '/'), + 'Reports': ('2009-01-01', 'Merchant', '/'), + 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), + 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), + 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), + 'Inbound': ('2010-10-01', 'SellerId', + '/FulfillmentInboundShipment/2010-10-01'), + 'Outbound': ('2010-10-01', 'SellerId', + '/FulfillmentOutboundShipment/2010-10-01'), + 'Inventory': ('2010-10-01', 'SellerId', + '/FulfillmentInventory/2010-10-01'), + 'Recommendations': ('2013-04-01', 'SellerId', + '/Recommendations/2013-04-01'), + 'CustomerInfo': ('2014-03-01', 'SellerId', + '/CustomerInformation/2014-03-01'), + 'CartInfo': ('2014-03-01', 'SellerId', + '/CartInformation/2014-03-01'), + 'Subscriptions': ('2013-07-01', 'SellerId', + '/Subscriptions/2013-07-01'), + 'OffAmazonPayments': ('2013-01-01', 'SellerId', + '/OffAmazonPayments/2013-01-01'), } content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip() decorated_attrs = ('action', 'response', 'section', @@ -78,7 +89,7 @@ def decorator(func): def wrapper(*args, **kw): - if filter(lambda x: not x in kw, (field, 'content_type')): + if any([f not in kw for f in (field, 'content_type')]): message = "{0} requires {1} and content_type arguments for " \ "building HTTP body".format(func.action, field) raise KeyError(message) @@ -94,32 +105,40 @@ return decorator -def destructure_object(value, into={}, prefix=''): - if isinstance(value, ResponseElement): - for name, attr in value.__dict__.items(): +def destructure_object(value, into, prefix, members=False): + if isinstance(value, boto.mws.response.ResponseElement): + destructure_object(value.__dict__, into, prefix, members=members) + elif isinstance(value, collections.Mapping): + for name in value: if name.startswith('_'): continue - destructure_object(attr, into=into, prefix=prefix + '.' + name) - elif filter(lambda x: isinstance(value, x), (list, set, tuple)): - for index, element in [(prefix + '.' + str(i + 1), value[i]) - for i in range(len(value))]: - destructure_object(element, into=into, prefix=index) + destructure_object(value[name], into, prefix + '.' + name, + members=members) + elif isinstance(value, basestring): + into[prefix] = value + elif isinstance(value, collections.Iterable): + for index, element in enumerate(value): + suffix = (members and '.member.' or '.') + str(index + 1) + destructure_object(element, into, prefix + suffix, + members=members) elif isinstance(value, bool): into[prefix] = str(value).lower() else: into[prefix] = value -def structured_objects(*fields): +def structured_objects(*fields, **kwargs): def decorator(func): def wrapper(*args, **kw): + members = kwargs.get('members', False) for field in filter(kw.has_key, fields): - destructure_object(kw.pop(field), into=kw, prefix=field) + destructure_object(kw.pop(field), kw, field, members=members) return func(*args, **kw) - wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__, - ', '.join(fields)) + wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ + "(ResponseElement or anything iterable/dict-like)" \ + .format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -203,7 +222,7 @@ def decorator(func): def wrapper(*args, **kw): - for field in filter(lambda x: isinstance(kw.get(x), bool), fields): + for field in [f for f in fields if isinstance(kw.get(f), bool)]: kw[field] = str(kw[field]).lower() return func(*args, **kw) wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, @@ -217,11 +236,6 @@ def decorator(func, quota=int(quota), restore=float(restore)): version, accesskey, path = api_version_path[section] action = ''.join(api or map(str.capitalize, func.func_name.split('_'))) - if hasattr(boto.mws.response, action + 'Response'): - response = getattr(boto.mws.response, action + 'Response') - else: - response = ResponseFactory(action) - response._action = action def wrapper(self, *args, **kw): kw.setdefault(accesskey, getattr(self, accesskey, None)) @@ -232,7 +246,9 @@ raise KeyError(message) kw['Action'] = action kw['Version'] = version - return func(self, path, response, *args, **kw) + response = self._response_factory(action, connection=self) + request = dict(path=path, quota=quota, restore=restore) + return func(self, request, response, *args, **kw) for attr in decorated_attrs: setattr(wrapper, attr, locals().get(attr)) wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ @@ -245,46 +261,77 @@ class MWSConnection(AWSQueryConnection): - ResponseError = ResponseErrorFactory + ResponseFactory = boto.mws.response.ResponseFactory + ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory def __init__(self, *args, **kw): kw.setdefault('host', 'mws.amazonservices.com') + self._sandboxed = kw.pop('sandbox', False) self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') self.SellerId = kw.pop('SellerId', None) or self.Merchant - AWSQueryConnection.__init__(self, *args, **kw) + kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) + super(MWSConnection, self).__init__(*args, **kw) + + def _setup_factories(self, extrascopes, **kw): + for factory, (scope, Default) in { + 'response_factory': + (boto.mws.response, self.ResponseFactory), + 'response_error_factory': + (boto.mws.exception, self.ResponseErrorFactory), + }.items(): + if factory in kw: + setattr(self, '_' + factory, kw.pop(factory)) + else: + scopes = extrascopes + [scope] + setattr(self, '_' + factory, Default(scopes=scopes)) + return kw + + def _sandboxify(self, path): + if not self._sandboxed: + return path + splat = path.split('/') + splat[-2] += '_Sandbox' + return splat.join('/') def _required_auth_capability(self): return ['mws'] - def post_request(self, path, params, cls, body='', headers={}, isXML=True): + def _post_request(self, request, params, parser, body='', headers=None): """Make a POST request, optionally with a content body, and return the response, optionally as raw text. - Modelled off of the inherited get_object/make_request flow. """ + headers = headers or {} + path = self._sandboxify(request['path']) request = self.build_base_http_request('POST', path, None, data=body, params=params, headers=headers, host=self.host) - response = self._mexe(request, override_num_retries=None) + try: + response = self._mexe(request, override_num_retries=None) + except BotoServerError, bs: + raise self._response_error_factor(bs.status, bs.reason, bs.body) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise self._response_error_factory(response.status, + response.reason, body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) - if not isXML: - digest = response.getheader('Content-MD5') + raise self._response_error_factory(response.status, + response.reason, body) + digest = response.getheader('Content-MD5') + if digest is not None: assert content_md5(body) == digest - return body - return self._parse_response(cls, body) + contenttype = response.getheader('Content-Type') + return self._parse_response(parser, contenttype, body) - def _parse_response(self, cls, body): - obj = cls(self) - h = XmlHandler(obj, self) - xml.sax.parseString(body, h) - return obj + def _parse_response(self, parser, contenttype, body): + if not contenttype.startswith('text/xml'): + return body + handler = XmlHandler(parser, self) + xml.sax.parseString(body, handler) + return parser def method_for(self, name): """Return the MWS API method referred to in the argument. @@ -321,49 +368,50 @@ @structured_lists('MarketplaceIdList.Id') @requires(['FeedType']) @api_action('Feeds', 15, 120) - def submit_feed(self, path, response, headers={}, body='', **kw): + def submit_feed(self, request, response, headers=None, body='', **kw): """Uploads a feed for processing by Amazon MWS. """ - return self.post_request(path, kw, response, body=body, - headers=headers) + headers = headers or {} + return self._post_request(request, kw, response, body=body, + headers=headers) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) - def get_feed_submission_list(self, path, response, **kw): + def get_feed_submission_list(self, request, response, **kw): """Returns a list of all feed submissions submitted in the previous 90 days. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Feeds', 0, 0) - def get_feed_submission_list_by_next_token(self, path, response, **kw): + def get_feed_submission_list_by_next_token(self, request, response, **kw): """Returns a list of feed submissions using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) - def get_feed_submission_count(self, path, response, **kw): + def get_feed_submission_count(self, request, response, **kw): """Returns a count of the feeds submitted in the previous 90 days. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') @api_action('Feeds', 10, 45) - def cancel_feed_submissions(self, path, response, **kw): + def cancel_feed_submissions(self, request, response, **kw): """Cancels one or more feed submissions and returns a count of the feed submissions that were canceled. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['FeedSubmissionId']) @api_action('Feeds', 15, 60) - def get_feed_submission_result(self, path, response, **kw): + def get_feed_submission_result(self, request, response, **kw): """Returns the feed processing report. """ - return self.post_request(path, kw, response, isXML=False) + return self._post_request(request, kw, response) def get_service_status(self, **kw): """Instruct the user on how to get service status. @@ -378,230 +426,230 @@ @boolean_arguments('ReportOptions=ShowSalesChannel') @requires(['ReportType']) @api_action('Reports', 15, 60) - def request_report(self, path, response, **kw): + def request_report(self, request, response, **kw): """Creates a report request and submits the request to Amazon MWS. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) - def get_report_request_list(self, path, response, **kw): + def get_report_request_list(self, request, response, **kw): """Returns a list of report requests that you can use to get the ReportRequestId for a report. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_request_list_by_next_token(self, path, response, **kw): + def get_report_request_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportRequestListByNextToken or GetReportRequestList, where the value of HasNext was true in that previous request. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) - def get_report_request_count(self, path, response, **kw): + def get_report_request_count(self, request, response, **kw): """Returns a count of report requests that have been submitted to Amazon MWS for processing. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Reports', 10, 45) - def cancel_report_requests(self, path, response, **kw): + def cancel_report_requests(self, request, response, **kw): """Cancel one or more report requests, returning the count of the canceled report requests and the report request information. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') @api_action('Reports', 10, 60) - def get_report_list(self, path, response, **kw): + def get_report_list(self, request, response, **kw): """Returns a list of reports that were created in the previous 90 days that match the query parameters. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_list_by_next_token(self, path, response, **kw): + def get_report_list_by_next_token(self, request, response, **kw): """Returns a list of reports using the NextToken, which was supplied by a previous request to either GetReportListByNextToken or GetReportList, where the value of HasNext was true in the previous call. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_count(self, path, response, **kw): + def get_report_count(self, request, response, **kw): """Returns a count of the reports, created in the previous 90 days, with a status of _DONE_ and that are available for download. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ReportId']) @api_action('Reports', 15, 60) - def get_report(self, path, response, **kw): + def get_report(self, request, response, **kw): """Returns the contents of a report. """ - return self.post_request(path, kw, response, isXML=False) + return self._post_request(request, kw, response) @requires(['ReportType', 'Schedule']) @api_action('Reports', 10, 45) - def manage_report_schedule(self, path, response, **kw): + def manage_report_schedule(self, request, response, **kw): """Creates, updates, or deletes a report request schedule for a specified report type. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_schedule_list(self, path, response, **kw): + def get_report_schedule_list(self, request, response, **kw): """Returns a list of order report requests that are scheduled to be submitted to Amazon MWS for processing. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_schedule_list_by_next_token(self, path, response, **kw): + def get_report_schedule_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportScheduleListByNextToken or GetReportScheduleList, where the value of HasNext was true in that previous request. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_schedule_count(self, path, response, **kw): + def get_report_schedule_count(self, request, response, **kw): """Returns a count of order report requests that are scheduled to be submitted to Amazon MWS. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @requires(['ReportIdList']) @structured_lists('ReportIdList.Id') @api_action('Reports', 10, 45) - def update_report_acknowledgements(self, path, response, **kw): + def update_report_acknowledgements(self, request, response, **kw): """Updates the acknowledged status of one or more reports. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') @api_action('Inbound', 30, 0.5) - def create_inbound_shipment_plan(self, path, response, **kw): + def create_inbound_shipment_plan(self, request, response, **kw): """Returns the information required to create an inbound shipment. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) - def create_inbound_shipment(self, path, response, **kw): + def create_inbound_shipment(self, request, response, **kw): """Creates an inbound shipment. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) - def update_inbound_shipment(self, path, response, **kw): + def update_inbound_shipment(self, request, response, **kw): """Updates an existing inbound shipment. Amazon documentation is ambiguous as to whether the InboundShipmentHeader and InboundShipmentItems arguments are required. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires_some_of('ShipmentIdList', 'ShipmentStatusList') @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') @api_action('Inbound', 30, 0.5) - def list_inbound_shipments(self, path, response, **kw): + def list_inbound_shipments(self, request, response, **kw): """Returns a list of inbound shipments based on criteria that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipments_by_next_token(self, path, response, **kw): + def list_inbound_shipments_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipments using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipment_items(self, path, response, **kw): + def list_inbound_shipment_items(self, request, response, **kw): """Returns a list of items in a specified inbound shipment, or a list of items that were updated within a specified time frame. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipment_items_by_next_token(self, path, response, **kw): + def list_inbound_shipment_items_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Inbound', 2, 300, 'GetServiceStatus') - def get_inbound_service_status(self, path, response, **kw): + def get_inbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inbound Shipment API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerSkus'], ['QueryStartDateTime']) @structured_lists('SellerSkus.member') @api_action('Inventory', 30, 0.5) - def list_inventory_supply(self, path, response, **kw): + def list_inventory_supply(self, request, response, **kw): """Returns information about the availability of a seller's inventory. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inventory', 30, 0.5) - def list_inventory_supply_by_next_token(self, path, response, **kw): + def list_inventory_supply_by_next_token(self, request, response, **kw): """Returns the next page of information about the availability of a seller's inventory using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Inventory', 2, 300, 'GetServiceStatus') - def get_inventory_service_status(self, path, response, **kw): + def get_inventory_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inventory API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['PackageNumber']) @api_action('Outbound', 30, 0.5) - def get_package_tracking_details(self, path, response, **kw): + def get_package_tracking_details(self, request, response, **kw): """Returns delivery tracking information for a package in an outbound shipment for a Multi-Channel Fulfillment order. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_objects('Address', 'Items') @requires(['Address', 'Items']) @api_action('Outbound', 30, 0.5) - def get_fulfillment_preview(self, path, response, **kw): + def get_fulfillment_preview(self, request, response, **kw): """Returns a list of fulfillment order previews based on items and shipping speed categories that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_objects('DestinationAddress', 'Items') @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', @@ -609,49 +657,49 @@ 'DestinationAddress', 'DisplayableOrderComment', 'Items']) @api_action('Outbound', 30, 0.5) - def create_fulfillment_order(self, path, response, **kw): + def create_fulfillment_order(self, request, response, **kw): """Requests that Amazon ship items from the seller's inventory to a destination address. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) - def get_fulfillment_order(self, path, response, **kw): + def get_fulfillment_order(self, request, response, **kw): """Returns a fulfillment order based on a specified SellerFulfillmentOrderId. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Outbound', 30, 0.5) - def list_all_fulfillment_orders(self, path, response, **kw): + def list_all_fulfillment_orders(self, request, response, **kw): """Returns a list of fulfillment orders fulfilled after (or at) a specified date or by fulfillment method. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Outbound', 30, 0.5) - def list_all_fulfillment_orders_by_next_token(self, path, response, **kw): + def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) - def cancel_fulfillment_order(self, path, response, **kw): + def cancel_fulfillment_order(self, request, response, **kw): """Requests that Amazon stop attempting to fulfill an existing fulfillment order. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Outbound', 2, 300, 'GetServiceStatus') - def get_outbound_service_status(self, path, response, **kw): + def get_outbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Outbound API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['CreatedAfter'], ['LastUpdatedAfter']) @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) @@ -665,7 +713,7 @@ @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', 'FulfillmentChannel.Channel', 'PaymentMethod.') @api_action('Orders', 6, 60) - def list_orders(self, path, response, **kw): + def list_orders(self, request, response, **kw): """Returns a list of orders created or updated during a time frame that you specify. """ @@ -680,145 +728,440 @@ message = "Don't include {0} when specifying " \ "{1}".format(' or '.join(dont), do) raise AssertionError(message) - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 6, 60) - def list_orders_by_next_token(self, path, response, **kw): + def list_orders_by_next_token(self, request, response, **kw): """Returns the next page of orders using the NextToken value that was returned by your previous request to either ListOrders or ListOrdersByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @structured_lists('AmazonOrderId.Id') @api_action('Orders', 6, 60) - def get_order(self, path, response, **kw): + def get_order(self, request, response, **kw): """Returns an order for each AmazonOrderId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @api_action('Orders', 30, 2) - def list_order_items(self, path, response, **kw): + def list_order_items(self, request, response, **kw): """Returns order item information for an AmazonOrderId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 30, 2) - def list_order_items_by_next_token(self, path, response, **kw): + def list_order_items_by_next_token(self, request, response, **kw): """Returns the next page of order items using the NextToken value that was returned by your previous request to either ListOrderItems or ListOrderItemsByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Orders', 2, 300, 'GetServiceStatus') - def get_orders_service_status(self, path, response, **kw): + def get_orders_service_status(self, request, response, **kw): """Returns the operational status of the Orders API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Query']) @api_action('Products', 20, 20) - def list_matching_products(self, path, response, **kw): + def list_matching_products(self, request, response, **kw): """Returns a list of products and their attributes, ordered by relevancy, based on a search query that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 20) - def get_matching_product(self, path, response, **kw): + def get_matching_product(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of ASIN values that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'IdType', 'IdList']) @structured_lists('IdList.Id') @api_action('Products', 20, 20) - def get_matching_product_for_id(self, path, response, **kw): + def get_matching_product_for_id(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of Product IDs that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') - def get_competitive_pricing_for_sku(self, path, response, **kw): + def get_competitive_pricing_for_sku(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the SellerSKUs and MarketplaceId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') - def get_competitive_pricing_for_asin(self, path, response, **kw): + def get_competitive_pricing_for_asin(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the ASINs and MarketplaceId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') - def get_lowest_offer_listings_for_sku(self, path, response, **kw): + def get_lowest_offer_listings_for_sku(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and SellerSKUs. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') - def get_lowest_offer_listings_for_asin(self, path, response, **kw): + def get_lowest_offer_listings_for_asin(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and ASINs. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKU']) @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') - def get_product_categories_for_sku(self, path, response, **kw): + def get_product_categories_for_sku(self, request, response, **kw): """Returns the product categories that a SellerSKU belongs to. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASIN']) @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') - def get_product_categories_for_asin(self, path, response, **kw): + def get_product_categories_for_asin(self, request, response, **kw): """Returns the product categories that an ASIN belongs to. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Products', 2, 300, 'GetServiceStatus') - def get_products_service_status(self, path, response, **kw): + def get_products_service_status(self, request, response, **kw): """Returns the operational status of the Products API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetMyPriceForSKU') + def get_my_price_for_sku(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on SellerSKU. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetMyPriceForASIN') + def get_my_price_for_asin(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on ASIN. + """ + return self._post_request(request, kw, response) @api_action('Sellers', 15, 60) - def list_marketplace_participations(self, path, response, **kw): + def list_marketplace_participations(self, request, response, **kw): """Returns a list of marketplaces that the seller submitting the request can sell in, and a list of participations that include seller-specific information in that marketplace. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Sellers', 15, 60) - def list_marketplace_participations_by_next_token(self, path, response, + def list_marketplace_participations_by_next_token(self, request, response, **kw): """Returns the next page of marketplaces and participations using the NextToken value that was returned by your previous request to either ListMarketplaceParticipations or ListMarketplaceParticipationsByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Recommendations', 5, 2) + def get_last_updated_time_for_recommendations(self, request, response, + **kw): + """Checks whether there are active recommendations for each category + for the given marketplace, and if there are, returns the time when + recommendations were last updated for each category. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @structured_lists('CategoryQueryList.CategoryQuery') + @api_action('Recommendations', 5, 2) + def list_recommendations(self, request, response, **kw): + """Returns your active recommendations for a specific category or for + all categories for a specific marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Recommendations', 5, 2) + def list_recommendations_by_next_token(self, request, response, **kw): + """Returns the next page of recommendations using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Recommendations', 2, 300, 'GetServiceStatus') + def get_recommendations_service_status(self, request, response, **kw): + """Returns the operational status of the Recommendations API section. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 15, 12) + def list_customers(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CustomerInfo', 50, 3) + def list_customers_by_next_token(self, request, response, **kw): + """Returns the next page of customers using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CustomerIdList']) + @structured_lists('CustomerIdList.CustomerId') + @api_action('CustomerInfo', 15, 12) + def get_customers_for_customer_id(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') + def get_customerinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Customer Information API + section. + """ + return self._post_request(request, kw, response) + + @requires(['DateRangeStart']) + @api_action('CartInfo', 15, 12) + def list_carts(self, request, response, **kw): + """Returns a list of shopping carts in your Webstore that were last + updated during the time range that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CartInfo', 50, 3) + def list_carts_by_next_token(self, request, response, **kw): + """Returns the next page of shopping carts using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CartIdList']) + @structured_lists('CartIdList.CartId') + @api_action('CartInfo', 15, 12) + def get_carts(self, request, response, **kw): + """Returns shopping carts based on the CartId values that you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CartInfo', 2, 300, 'GetServiceStatus') + def get_cartinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Cart Information API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def register_destination(self, request, response, **kw): + """Specifies a new destination where you want to receive notifications. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def deregister_destination(self, request, response, **kw): + """Removes an existing destination from the list of registered + destinations. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_registered_destinations(self, request, response, **kw): + """Lists all current destinations that you have registered. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def send_test_notification_to_destination(self, request, response, **kw): + """Sends a test notification to an existing destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def create_subscription(self, request, response, **kw): + """Creates a new subscription for the specified notification type + and destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def get_subscription(self, request, response, **kw): + """Gets the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def delete_subscription(self, request, response, **kw): + """Deletes the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_subscriptions(self, request, response, **kw): + """Returns a list of all your current subscriptions. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def update_subscription(self, request, response, **kw): + """Updates the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @api_action('Subscriptions', 2, 300, 'GetServiceStatus') + def get_subscriptions_service_status(self, request, response, **kw): + """Returns the operational status of the Subscriptions API section. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) + @structured_objects('OrderReferenceAttributes') + @api_action('OffAmazonPayments', 10, 1) + def set_order_reference_details(self, request, response, **kw): + """Sets order reference details such as the order total and a + description for the order. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 20, 2) + def get_order_reference_details(self, request, response, **kw): + """Returns details about the Order Reference object and its current + state. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def confirm_order_reference(self, request, response, **kw): + """Confirms that the order reference is free of constraints and all + required information has been set on the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def cancel_order_reference(self, request, response, **kw): + """Cancel an order reference; all authorizations associated with + this order reference are also closed. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def close_order_reference(self, request, response, **kw): + """Confirms that an order reference has been fulfilled (fully + or partially) and that you do not expect to create any new + authorizations on this order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', + 'AuthorizationAmount']) + @structured_objects('AuthorizationAmount') + @api_action('OffAmazonPayments', 10, 1) + def authorize(self, request, response, **kw): + """Reserves a specified amount against the payment method(s) stored in + the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 20, 2) + def get_authorization_details(self, request, response, **kw): + """Returns the status of a particular authorization and the total + amount captured on the authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) + @structured_objects('CaptureAmount') + @api_action('OffAmazonPayments', 10, 1) + def capture(self, request, response, **kw): + """Captures funds from an authorized payment instrument. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId']) + @api_action('OffAmazonPayments', 20, 2) + def get_capture_details(self, request, response, **kw): + """Returns the status of a particular capture and the total amount + refunded on the capture. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 10, 1) + def close_authorization(self, request, response, **kw): + """Closes an authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) + @structured_objects('RefundAmount') + @api_action('OffAmazonPayments', 10, 1) + def refund(self, request, response, **kw): + """Refunds a previously captured amount. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonRefundId']) + @api_action('OffAmazonPayments', 20, 2) + def get_refund_details(self, request, response, **kw): + """Returns the status of a particular refund. + """ + return self._post_request(request, kw, response) + + @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') + def get_offamazonpayments_service_status(self, request, response, **kw): + """Returns the operational status of the Off-Amazon Payments API + section. + """ + return self._post_request(request, kw, response) diff -Nru python-boto-2.20.1/boto/mws/exception.py python-boto-2.29.1/boto/mws/exception.py --- python-boto-2.20.1/boto/mws/exception.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mws/exception.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -19,19 +19,16 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.exception import BotoServerError +from boto.mws.response import ResponseFactory -class ResponseErrorFactory(BotoServerError): +class ResponseErrorFactory(ResponseFactory): - def __new__(cls, *args, **kw): - error = BotoServerError(*args, **kw) - try: - newclass = globals()[error.error_code] - except KeyError: - newclass = ResponseError - obj = newclass.__new__(newclass, *args, **kw) - obj.__dict__.update(error.__dict__) - return obj + def __call__(self, status, reason, body=None): + server = BotoServerError(status, reason, body=body) + supplied = self.find_element(server.error_code, '', ResponseError) + print supplied.__name__ + return supplied(status, reason, body=body) class ResponseError(BotoServerError): @@ -41,16 +38,14 @@ retry = False def __repr__(self): - return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, - self.status, self.reason, - self.error_message) + return '{0.__name__}({1.reason}: "{1.message}")' \ + .format(self.__class__, self) def __str__(self): - return 'MWS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ - '{2}\n' \ - '{0.error_message}'.format(self, - self.retry and '(Retriable)' or '', - self.__doc__.strip()) + doc = self.__doc__ and self.__doc__.strip() + "\n" or '' + return '{1.__name__}: {0.reason} {2}\n{3}' \ + '{0.message}'.format(self, self.__class__, + self.retry and '(Retriable)' or '', doc) class RetriableResponseError(ResponseError): diff -Nru python-boto-2.20.1/boto/mws/response.py python-boto-2.29.1/boto/mws/response.py --- python-boto-2.20.1/boto/mws/response.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/mws/response.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,23 +1,21 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the fol- lowing conditions: # -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. # -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from decimal import Decimal @@ -62,10 +60,10 @@ setattr(self._parent, self._name, self._clone) def start(self, *args, **kw): - raise NotImplemented + raise NotImplementedError def end(self, *args, **kw): - raise NotImplemented + raise NotImplementedError def teardown(self, *args, **kw): setattr(self._parent, self._name, self._value) @@ -82,7 +80,7 @@ class SimpleList(DeclarativeType): def __init__(self, *args, **kw): - DeclarativeType.__init__(self, *args, **kw) + super(SimpleList, self).__init__(*args, **kw) self._value = [] def start(self, *args, **kw): @@ -108,16 +106,16 @@ assert 'member' not in kw, message if _member is None: if _hint is None: - Element.__init__(self, *args, member=ElementList(**kw)) + super(MemberList, self).__init__(*args, member=ElementList(**kw)) else: - Element.__init__(self, _hint=_hint) + super(MemberList, self).__init__(_hint=_hint) else: if _hint is None: if issubclass(_member, DeclarativeType): member = _member(**kw) else: member = ElementList(_member, **kw) - Element.__init__(self, *args, member=member) + super(MemberList, self).__init__(*args, member=member) else: message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__, _hint) @@ -130,17 +128,43 @@ if isinstance(self._value.member, DeclarativeType): self._value.member = [] self._value = self._value.member - Element.teardown(self, *args, **kw) + super(MemberList, self).teardown(*args, **kw) -def ResponseFactory(action, force=None): - result = force or globals().get(action + 'Result', ResponseElement) - - class MWSResponse(Response): - _name = action + 'Response' - - setattr(MWSResponse, action + 'Result', Element(result)) - return MWSResponse +class ResponseFactory(object): + def __init__(self, scopes=None): + self.scopes = [] if scopes is None else scopes + + def element_factory(self, name, parent): + class DynamicElement(parent): + _name = name + setattr(DynamicElement, '__name__', str(name)) + return DynamicElement + + def search_scopes(self, key): + for scope in self.scopes: + if hasattr(scope, key): + return getattr(scope, key) + if hasattr(scope, '__getitem__'): + if key in scope: + return scope[key] + + def find_element(self, action, suffix, parent): + element = self.search_scopes(action + suffix) + if element is not None: + return element + if action.endswith('ByNextToken'): + element = self.search_scopes(action[:-len('ByNextToken')] + suffix) + if element is not None: + return self.element_factory(action + suffix, element) + return self.element_factory(action + suffix, parent) + + def __call__(self, action, connection=None): + response = self.find_element(action, 'Response', Response) + if not hasattr(response, action + 'Result'): + result = self.find_element(action, 'Result', ResponseElement) + setattr(response, action + 'Result', Element(result)) + return response(connection=connection) def strip_namespace(func): @@ -191,8 +215,6 @@ name = self.__class__.__name__ if name.startswith('JIT_'): name = '^{0}^'.format(self._name or '') - elif name == 'MWSResponse': - name = '^{0}^'.format(self._name or name) return '{0}{1!r}({2})'.format( name, self.copy(), ', '.join(map(render, attrs))) @@ -231,7 +253,7 @@ if name == self._name: self.update(attrs) else: - return ResponseElement.startElement(self, name, attrs, connection) + return super(Response, self).startElement(name, attrs, connection) @property def _result(self): @@ -247,7 +269,7 @@ def __init__(self, *args, **kw): setattr(self, self._action + 'Result', ElementList(self._ResultClass)) - Response.__init__(self, *args, **kw) + super(ResponseResultList, self).__init__(*args, **kw) class FeedSubmissionInfo(ResponseElement): @@ -262,10 +284,6 @@ FeedSubmissionInfo = ElementList(FeedSubmissionInfo) -class GetFeedSubmissionListByNextTokenResult(GetFeedSubmissionListResult): - pass - - class GetFeedSubmissionCountResult(ResponseElement): pass @@ -290,10 +308,6 @@ ReportRequestInfo = ElementList() -class GetReportRequestListByNextTokenResult(GetReportRequestListResult): - pass - - class CancelReportRequestsResult(RequestReportResult): pass @@ -302,10 +316,6 @@ ReportInfo = ElementList() -class GetReportListByNextTokenResult(GetReportListResult): - pass - - class ManageReportScheduleResult(ResponseElement): ReportSchedule = Element() @@ -314,10 +324,6 @@ pass -class GetReportScheduleListByNextTokenResult(GetReportScheduleListResult): - pass - - class UpdateReportAcknowledgementsResult(GetReportListResult): pass @@ -331,18 +337,10 @@ ShipmentData = MemberList(ShipFromAddress=Element()) -class ListInboundShipmentsByNextTokenResult(ListInboundShipmentsResult): - pass - - class ListInboundShipmentItemsResult(ResponseElement): ItemData = MemberList() -class ListInboundShipmentItemsByNextTokenResult(ListInboundShipmentItemsResult): - pass - - class ListInventorySupplyResult(ResponseElement): InventorySupplyList = MemberList( EarliestAvailability=Element(), @@ -353,10 +351,6 @@ ) -class ListInventorySupplyByNextTokenResult(ListInventorySupplyResult): - pass - - class ComplexAmount(ResponseElement): _amount = 'Value' @@ -374,13 +368,13 @@ if name not in ('CurrencyCode', self._amount): message = 'Unrecognized tag {0} in ComplexAmount'.format(name) raise AssertionError(message) - return ResponseElement.startElement(self, name, attrs, connection) + return super(ComplexAmount, self).startElement(name, attrs, connection) @strip_namespace def endElement(self, name, value, connection): if name == self._amount: value = Decimal(value) - ResponseElement.endElement(self, name, value, connection) + super(ComplexAmount, self).endElement(name, value, connection) class ComplexMoney(ComplexAmount): @@ -402,13 +396,13 @@ if name not in ('Unit', 'Value'): message = 'Unrecognized tag {0} in ComplexWeight'.format(name) raise AssertionError(message) - return ResponseElement.startElement(self, name, attrs, connection) + return super(ComplexWeight, self).startElement(name, attrs, connection) @strip_namespace def endElement(self, name, value, connection): if name == 'Value': value = Decimal(value) - ResponseElement.endElement(self, name, value, connection) + super(ComplexWeight, self).endElement(name, value, connection) class Dimension(ComplexType): @@ -472,10 +466,6 @@ FulfillmentOrders = MemberList(FulfillmentOrder) -class ListAllFulfillmentOrdersByNextTokenResult(ListAllFulfillmentOrdersResult): - pass - - class GetPackageTrackingDetailsResult(ResponseElement): ShipToAddress = Element() TrackingEvents = MemberList(EventAddress=Element()) @@ -501,7 +491,7 @@ 'MediaType', 'OperatingSystem', 'Platform') for name in names: setattr(self, name, SimpleList()) - AttributeSet.__init__(self, *args, **kw) + super(ItemAttributes, self).__init__(*args, **kw) class VariationRelationship(ResponseElement): @@ -541,6 +531,11 @@ Price = Element(Price) +class Offer(ResponseElement): + BuyingPrice = Element(Price) + RegularPrice = Element(ComplexMoney) + + class Product(ResponseElement): _namespace = 'ns2' Identifiers = Element(MarketplaceASIN=Element(), @@ -558,6 +553,9 @@ LowestOfferListings = Element( LowestOfferListing=ElementList(LowestOfferListing), ) + Offers = Element( + Offer=ElementList(Offer), + ) class ListMatchingProductsResult(ResponseElement): @@ -601,15 +599,23 @@ pass +class GetMyPriceForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForASINResponse(ProductsBulkOperationResponse): + pass + + class ProductCategory(ResponseElement): def __init__(self, *args, **kw): setattr(self, 'Parent', Element(ProductCategory)) - ResponseElement.__init__(self, *args, **kw) + super(ProductCategory, self).__init__(*args, **kw) class GetProductCategoriesResult(ResponseElement): - Self = Element(ProductCategory) + Self = ElementList(ProductCategory) class GetProductCategoriesForSKUResult(GetProductCategoriesResult): @@ -636,10 +642,6 @@ Orders = Element(Order=ElementList(Order)) -class ListOrdersByNextTokenResult(ListOrdersResult): - pass - - class GetOrderResult(ListOrdersResult): pass @@ -667,5 +669,118 @@ ListMarketplaces = Element(Marketplace=ElementList()) -class ListMarketplaceParticipationsByNextTokenResult(ListMarketplaceParticipationsResult): +class ListRecommendationsResult(ResponseElement): + ListingQualityRecommendations = MemberList(ItemIdentifier=Element()) + + +class Customer(ResponseElement): + PrimaryContactInfo = Element() + ShippingAddressList = Element(ShippingAddress=ElementList()) + AssociatedMarketplaces = Element(MarketplaceDomain=ElementList()) + + +class ListCustomersResult(ResponseElement): + CustomerList = Element(Customer=ElementList(Customer)) + + +class GetCustomersForCustomerIdResult(ListCustomersResult): + pass + + +class CartItem(ResponseElement): + CurrentPrice = Element(ComplexMoney) + SalePrice = Element(ComplexMoney) + + +class Cart(ResponseElement): + ActiveCartItemList = Element(CartItem=ElementList(CartItem)) + SavedCartItemList = Element(CartItem=ElementList(CartItem)) + + +class ListCartsResult(ResponseElement): + CartList = Element(Cart=ElementList(Cart)) + + +class GetCartsResult(ListCartsResult): + pass + + +class Destination(ResponseElement): + AttributeList = MemberList() + + +class ListRegisteredDestinationsResult(ResponseElement): + DestinationList = MemberList(Destination) + + +class Subscription(ResponseElement): + Destination = Element(Destination) + + +class GetSubscriptionResult(ResponseElement): + Subscription = Element(Subscription) + + +class ListSubscriptionsResult(ResponseElement): + SubscriptionList = MemberList(Subscription) + + +class OrderReferenceDetails(ResponseElement): + Buyer = Element() + OrderTotal = Element(ComplexMoney) + Destination = Element(PhysicalDestination=Element()) + SellerOrderAttributes = Element() + OrderReferenceStatus = Element() + Constraints = ElementList() + + +class SetOrderReferenceDetailsResult(ResponseElement): + OrderReferenceDetails = Element(OrderReferenceDetails) + + +class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult): + pass + + +class AuthorizationDetails(ResponseElement): + AuthorizationAmount = Element(ComplexMoney) + CapturedAmount = Element(ComplexMoney) + AuthorizationFee = Element(ComplexMoney) + AuthorizationStatus = Element() + + +class AuthorizeResult(ResponseElement): + AuthorizationDetails = Element(AuthorizationDetails) + + +class GetAuthorizationDetailsResult(AuthorizeResult): + pass + + +class CaptureDetails(ResponseElement): + CaptureAmount = Element(ComplexMoney) + RefundedAmount = Element(ComplexMoney) + CaptureFee = Element(ComplexMoney) + CaptureStatus = Element() + + +class CaptureResult(ResponseElement): + CaptureDetails = Element(CaptureDetails) + + +class GetCaptureDetailsResult(CaptureResult): + pass + + +class RefundDetails(ResponseElement): + RefundAmount = Element(ComplexMoney) + FeeRefunded = Element(ComplexMoney) + RefundStatus = Element() + + +class RefundResult(ResponseElement): + RefundDetails = Element(RefundDetails) + + +class GetRefundDetails(RefundResult): pass diff -Nru python-boto-2.20.1/boto/opsworks/__init__.py python-boto-2.29.1/boto/opsworks/__init__.py --- python-boto-2.20.1/boto/opsworks/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/opsworks/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon OpsWorks service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.opsworks.layer1 import OpsWorksConnection + return get_regions('opsworks', connection_cls=OpsWorksConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.20.1/boto/opsworks/layer1.py python-boto-2.29.1/boto/opsworks/layer1.py --- python-boto-2.20.1/boto/opsworks/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/opsworks/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,11 @@ # IN THE SOFTWARE. # -import json +try: + import json +except ImportError: + import simplejson as json + import boto from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo @@ -41,6 +45,23 @@ lifecycle. For information about this product, go to the `AWS OpsWorks`_ details page. + **SDKs and CLI** + + The most common way to use the AWS OpsWorks API is by using the + AWS Command Line Interface (CLI) or by using one of the AWS SDKs + to implement applications in your preferred language. For more + information, see: + + + + `AWS CLI`_ + + `AWS SDK for Java`_ + + `AWS SDK for .NET`_ + + `AWS SDK for PHP 2`_ + + `AWS SDK for Ruby`_ + + `AWS SDK for Node.js`_ + + `AWS SDK for Python(Boto)`_ + + **Endpoints** AWS OpsWorks supports only one endpoint, opsworks.us- @@ -53,7 +74,8 @@ When you call CreateStack, CloneStack, or UpdateStack we recommend you use the `ConfigurationManager` parameter to specify the Chef version, 0.9 or 11.4. The default value is currently 0.9. However, - we expect to change the default value to 11.4 in September 2013. + we expect to change the default value to 11.4 in October 2013. For + more information, see `Using AWS OpsWorks with Chef 11`_. """ APIVersion = "2013-02-18" DefaultRegionName = "us-east-1" @@ -69,12 +91,12 @@ def __init__(self, **kwargs): - region = kwargs.get('region') + region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(OpsWorksConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): @@ -85,7 +107,13 @@ Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. For more information, see - ``_. + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type volume_id: string :param volume_id: The volume ID. @@ -105,7 +133,13 @@ Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For - more information, see ``_. + more information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type elastic_ip: string :param elastic_ip: The Elastic IP address. @@ -131,6 +165,12 @@ or CLI. For more information, see ` Elastic Load Balancing Developer Guide`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type elastic_load_balancer_name: string :param elastic_load_balancer_name: The Elastic Load Balancing instance's name. @@ -160,6 +200,11 @@ Creates a clone of a specified stack. For more information, see `Clone a Stack`_. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type source_stack_id: string :param source_stack_id: The source stack ID. @@ -233,20 +278,20 @@ :param hostname_theme: The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, `HostnameTheme` is set to - Layer_Dependent, which creates host names by appending integers to - the layer's short name. The other themes are: + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: - + Baked_Goods - + Clouds - + European_Cities - + Fruits - + Greek_Deities - + Legendary_Creatures_from_Japan - + Planets_and_Moons - + Roman_Deities - + Scottish_Islands - + US_Cities - + Wild_Cats + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` To obtain a generated host name, call `GetHostNameSuggestion`, which @@ -359,6 +404,12 @@ Creates an app for a specified stack. For more information, see `Creating Apps`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -430,6 +481,12 @@ For more information, see `Deploying Apps`_ and `Run Stack Commands`_. + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -479,6 +536,12 @@ Creates an instance in a specified stack. For more information, see `Adding an Instance to a Layer`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -614,6 +677,12 @@ number of custom layers, so you can call **CreateLayer** as many times as you like for that layer type. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The layer stack ID. @@ -736,6 +805,11 @@ Creates a new stack. For more information, see `Create a New Stack`_. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type name: string :param name: The stack name. @@ -798,20 +872,20 @@ :param hostname_theme: The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, `HostnameTheme` is set to - Layer_Dependent, which creates host names by appending integers to - the layer's short name. The other themes are: + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: - + Baked_Goods - + Clouds - + European_Cities - + Fruits - + Greek_Deities - + Legendary_Creatures_from_Japan - + Planets_and_Moons - + Roman_Deities - + Scottish_Islands - + US_Cities - + Wild_Cats + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` To obtain a generated host name, call `GetHostNameSuggestion`, which @@ -902,10 +976,15 @@ body=json.dumps(params)) def create_user_profile(self, iam_user_arn, ssh_username=None, - ssh_public_key=None): + ssh_public_key=None, allow_self_management=None): """ Creates a new user profile. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type iam_user_arn: string :param iam_user_arn: The user's IAM ARN. @@ -915,12 +994,19 @@ :type ssh_public_key: string :param ssh_public_key: The user's public SSH key. + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + ``_. + """ params = {'IamUserArn': iam_user_arn, } if ssh_username is not None: params['SshUsername'] = ssh_username if ssh_public_key is not None: params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management return self.make_request(action='CreateUserProfile', body=json.dumps(params)) @@ -928,6 +1014,12 @@ """ Deletes a specified app. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type app_id: string :param app_id: The app ID. @@ -943,6 +1035,12 @@ you can delete it. For more information, see `Deleting Instances`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -969,6 +1067,12 @@ all associated instances. For more information, see `How to Delete a Layer`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type layer_id: string :param layer_id: The layer ID. @@ -983,6 +1087,12 @@ instances, layers, and apps. For more information, see `Shut Down a Stack`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -995,6 +1105,11 @@ """ Deletes a user profile. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type iam_user_arn: string :param iam_user_arn: The user's IAM ARN. @@ -1007,7 +1122,13 @@ """ Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see - ``_. + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type elastic_ip: string :param elastic_ip: The Elastic IP address. @@ -1020,7 +1141,14 @@ def deregister_volume(self, volume_id): """ Deregisters an Amazon EBS volume. The volume can then be - registered by another stack. For more information, see ``_. + registered by another stack. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type volume_id: string :param volume_id: The volume ID. @@ -1036,6 +1164,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: The app stack ID. If you use this parameter, `DescribeApps` returns a description of the apps in the specified @@ -1062,6 +1196,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type deployment_id: string :param deployment_id: The deployment ID. If you include this parameter, `DescribeCommands` returns a description of the commands associated @@ -1096,6 +1236,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: The stack ID. If you include this parameter, `DescribeDeployments` returns a description of the commands @@ -1129,6 +1275,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type instance_id: string :param instance_id: The instance ID. If you include this parameter, `DescribeElasticIps` returns a description of the Elastic IP @@ -1162,6 +1314,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: A stack ID. The action describes the stack's Elastic Load Balancing instances. @@ -1186,6 +1344,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: A stack ID. If you use this parameter, `DescribeInstances` returns descriptions of the instances @@ -1220,6 +1384,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1244,6 +1414,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type layer_ids: list :param layer_ids: An array of layer IDs. @@ -1252,10 +1428,31 @@ return self.make_request(action='DescribeLoadBasedAutoScaling', body=json.dumps(params)) - def describe_permissions(self, iam_user_arn, stack_id): + def describe_my_user_profile(self): + """ + Describes a user's SSH information. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + + """ + params = {} + return self.make_request(action='DescribeMyUserProfile', + body=json.dumps(params)) + + def describe_permissions(self, iam_user_arn=None, stack_id=None): """ Describes the permissions for a specified stack. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type iam_user_arn: string :param iam_user_arn: The user's IAM ARN. For more information about IAM ARNs, see `Using Identifiers`_. @@ -1264,7 +1461,11 @@ :param stack_id: The stack ID. """ - params = {'IamUserArn': iam_user_arn, 'StackId': stack_id, } + params = {} + if iam_user_arn is not None: + params['IamUserArn'] = iam_user_arn + if stack_id is not None: + params['StackId'] = stack_id return self.make_request(action='DescribePermissions', body=json.dumps(params)) @@ -1274,6 +1475,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type instance_id: string :param instance_id: The instance ID. If you use this parameter, `DescribeRaidArrays` returns descriptions of the RAID arrays @@ -1299,6 +1506,12 @@ """ Describes AWS OpsWorks service errors. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_id: string :param stack_id: The stack ID. If you use this parameter, `DescribeServiceErrors` returns descriptions of the errors @@ -1326,10 +1539,36 @@ return self.make_request(action='DescribeServiceErrors', body=json.dumps(params)) + def describe_stack_summary(self, stack_id): + """ + Describes the number of layers and apps in a specified stack, + and the number of instances in each state, such as + `running_setup` or `online`. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackSummary', + body=json.dumps(params)) + def describe_stacks(self, stack_ids=None): """ Requests a description of one or more stacks. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type stack_ids: list :param stack_ids: An array of stack IDs that specify the stacks to be described. If you omit this parameter, `DescribeStacks` returns a @@ -1349,6 +1588,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type instance_ids: list :param instance_ids: An array of instance IDs. @@ -1357,16 +1602,23 @@ return self.make_request(action='DescribeTimeBasedAutoScaling', body=json.dumps(params)) - def describe_user_profiles(self, iam_user_arns): + def describe_user_profiles(self, iam_user_arns=None): """ Describe specified users. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type iam_user_arns: list :param iam_user_arns: An array of IAM user ARNs that identify the users to be described. """ - params = {'IamUserArns': iam_user_arns, } + params = {} + if iam_user_arns is not None: + params['IamUserArns'] = iam_user_arns return self.make_request(action='DescribeUserProfiles', body=json.dumps(params)) @@ -1377,6 +1629,12 @@ You must specify at least one of the parameters. + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + :type instance_id: string :param instance_id: The instance ID. If you use this parameter, `DescribeVolumes` returns descriptions of the volumes associated @@ -1415,6 +1673,12 @@ Detaches a specified Elastic Load Balancing instance from its layer. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type elastic_load_balancer_name: string :param elastic_load_balancer_name: The Elastic Load Balancing instance's name. @@ -1435,7 +1699,13 @@ """ Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more - information, see ``_. + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type elastic_ip: string :param elastic_ip: The Elastic IP address. @@ -1450,6 +1720,12 @@ Gets a generated host name for the specified layer, based on the current host name theme. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type layer_id: string :param layer_id: The layer ID. @@ -1463,6 +1739,12 @@ Reboots a specified instance. For more information, see `Starting, Stopping, and Rebooting Instances`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -1477,7 +1759,13 @@ address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see - ``_. + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type elastic_ip: string :param elastic_ip: The Elastic IP address. @@ -1495,7 +1783,14 @@ Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by - calling DeregisterVolume. For more information, see ``_. + calling DeregisterVolume. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type ec_2_volume_id: string :param ec_2_volume_id: The Amazon EBS volume ID. @@ -1523,6 +1818,12 @@ you have created enough instances to handle the maximum anticipated load. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type layer_id: string :param layer_id: The layer ID. @@ -1553,11 +1854,17 @@ body=json.dumps(params)) def set_permission(self, stack_id, iam_user_arn, allow_ssh=None, - allow_sudo=None): + allow_sudo=None, level=None): """ Specifies a stack's permissions. For more information, see `Security and Permissions`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1572,12 +1879,28 @@ :param allow_sudo: The user is allowed to use **sudo** to elevate privileges. + :type level: string + :param level: The user's permission level, which must be set to one of + the following strings. You cannot set your own permissions level. + + + `deny` + + `show` + + `deploy` + + `manage` + + `iam_only` + + + For more information on the permissions associated with these levels, + see `Managing User Permissions`_ + """ params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, } if allow_ssh is not None: params['AllowSsh'] = allow_ssh if allow_sudo is not None: params['AllowSudo'] = allow_sudo + if level is not None: + params['Level'] = level return self.make_request(action='SetPermission', body=json.dumps(params)) @@ -1588,6 +1911,12 @@ specified instance. For more information, see `Managing Load with Time-based and Load-based Instances`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -1607,6 +1936,12 @@ Starts a specified instance. For more information, see `Starting, Stopping, and Rebooting Instances`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -1619,6 +1954,12 @@ """ Starts stack's instances. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1635,6 +1976,12 @@ without losing data. For more information, see `Starting, Stopping, and Rebooting Instances`_. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -1647,6 +1994,12 @@ """ Stops a specified stack. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1658,7 +2011,14 @@ def unassign_volume(self, volume_id): """ Unassigns an assigned Amazon EBS volume. The volume remains - registered with the stack. For more information, see ``_. + registered with the stack. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type volume_id: string :param volume_id: The volume ID. @@ -1674,6 +2034,12 @@ """ Updates a specified app. + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type app_id: string :param app_id: The app ID. @@ -1728,7 +2094,13 @@ def update_elastic_ip(self, elastic_ip, name=None): """ Updates a registered Elastic IP address's name. For more - information, see ``_. + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type elastic_ip: string :param elastic_ip: The address. @@ -1751,6 +2123,12 @@ """ Updates a specified instance. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type instance_id: string :param instance_id: The instance ID. @@ -1854,6 +2232,12 @@ """ Updates a specified layer. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type layer_id: string :param layer_id: The layer ID. @@ -1947,6 +2331,25 @@ return self.make_request(action='UpdateLayer', body=json.dumps(params)) + def update_my_user_profile(self, ssh_public_key=None): + """ + Updates a user's SSH public key. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + :type ssh_public_key: string + :param ssh_public_key: The user's SSH public key. + + """ + params = {} + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + return self.make_request(action='UpdateMyUserProfile', + body=json.dumps(params)) + def update_stack(self, stack_id, name=None, attributes=None, service_role_arn=None, default_instance_profile_arn=None, default_os=None, @@ -1958,6 +2361,12 @@ """ Updates a specified stack. + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + :type stack_id: string :param stack_id: The stack ID. @@ -1995,20 +2404,20 @@ :param hostname_theme: The stack's new host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, `HostnameTheme` is set to - Layer_Dependent, which creates host names by appending integers to - the layer's short name. The other themes are: + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: - + Baked_Goods - + Clouds - + European_Cities - + Fruits - + Greek_Deities - + Legendary_Creatures_from_Japan - + Planets_and_Moons - + Roman_Deities - + Scottish_Islands - + US_Cities - + Wild_Cats + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` To obtain a generated host name, call `GetHostNameSuggestion`, which @@ -2096,10 +2505,15 @@ body=json.dumps(params)) def update_user_profile(self, iam_user_arn, ssh_username=None, - ssh_public_key=None): + ssh_public_key=None, allow_self_management=None): """ Updates a specified user profile. + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + :type iam_user_arn: string :param iam_user_arn: The user IAM ARN. @@ -2109,19 +2523,32 @@ :type ssh_public_key: string :param ssh_public_key: The user's new SSH public key. + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + `Managing User Permissions`_. + """ params = {'IamUserArn': iam_user_arn, } if ssh_username is not None: params['SshUsername'] = ssh_username if ssh_public_key is not None: params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management return self.make_request(action='UpdateUserProfile', body=json.dumps(params)) def update_volume(self, volume_id, name=None, mount_point=None): """ Updates an Amazon EBS volume's name or mount point. For more - information, see ``_. + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. :type volume_id: string :param volume_id: The volume ID. diff -Nru python-boto-2.20.1/boto/provider.py python-boto-2.29.1/boto/provider.py --- python-boto-2.20.1/boto/provider.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/provider.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,6 +31,8 @@ import boto from boto import config +from boto.compat import expanduser +from boto.pyami.config import Config from boto.gs.acl import ACL from boto.gs.acl import CannedACLStrings as CannedGSACLStrings from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings @@ -57,6 +59,7 @@ MFA_HEADER_KEY = 'mfa-header' SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header' VERSION_ID_HEADER_KEY = 'version-id-header' +RESTORE_HEADER_KEY = 'restore-header' STORAGE_COPY_ERROR = 'StorageCopyError' STORAGE_CREATE_ERROR = 'StorageCreateError' @@ -65,11 +68,16 @@ STORAGE_RESPONSE_ERROR = 'StorageResponseError' +class ProfileNotFoundError(ValueError): pass + + class Provider(object): CredentialMap = { - 'aws': ('aws_access_key_id', 'aws_secret_access_key'), - 'google': ('gs_access_key_id', 'gs_secret_access_key'), + 'aws': ('aws_access_key_id', 'aws_secret_access_key', + 'aws_security_token', 'aws_profile'), + 'google': ('gs_access_key_id', 'gs_secret_access_key', + None, None), } AclClassMap = { @@ -122,6 +130,7 @@ VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', + RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore', }, 'google': { HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, @@ -144,6 +153,7 @@ VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: None, MFA_HEADER_KEY: None, + RESTORE_HEADER_KEY: None, } } @@ -165,20 +175,29 @@ } def __init__(self, name, access_key=None, secret_key=None, - security_token=None): + security_token=None, profile_name=None): self.host = None self.port = None self.host_header = None self.access_key = access_key self.secret_key = secret_key self.security_token = security_token + self.profile_name = profile_name self.name = name self.acl_class = self.AclClassMap[self.name] self.canned_acls = self.CannedAclsMap[self.name] self._credential_expiry_time = None - self.get_credentials(access_key, secret_key) + + # Load shared credentials file if it exists + shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') + self.shared_credentials = Config(do_load=False) + if os.path.exists(shared_path): + self.shared_credentials.load_from_path(shared_path) + + self.get_credentials(access_key, secret_key, security_token, profile_name) self.configure_headers() self.configure_errors() + # Allow config file to override default host and port. host_opt_name = '%s_host' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_opt_name): @@ -239,14 +258,42 @@ else: return False - def get_credentials(self, access_key=None, secret_key=None): - access_key_name, secret_key_name = self.CredentialMap[self.name] + def get_credentials(self, access_key=None, secret_key=None, + security_token=None, profile_name=None): + access_key_name, secret_key_name, security_token_name, \ + profile_name_name = self.CredentialMap[self.name] + + # Load profile from shared environment variable if it was not + # already passed in and the environment variable exists + if profile_name is None and profile_name_name is not None and \ + profile_name_name.upper() in os.environ: + profile_name = os.environ[profile_name_name.upper()] + + shared = self.shared_credentials + if access_key is not None: self.access_key = access_key boto.log.debug("Using access key provided by client.") elif access_key_name.upper() in os.environ: self.access_key = os.environ[access_key_name.upper()] boto.log.debug("Using access key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, access_key_name): + self.access_key = shared.get(profile_name, access_key_name) + boto.log.debug("Using access key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, + access_key_name): + self.access_key = config.get("profile %s" % profile_name, + access_key_name) + boto.log.debug("Using access key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', access_key_name): + self.access_key = shared.get('default', access_key_name) + boto.log.debug("Using access key found in shared credential file.") elif config.has_option('Credentials', access_key_name): self.access_key = config.get('Credentials', access_key_name) boto.log.debug("Using access key found in config file.") @@ -257,6 +304,22 @@ elif secret_key_name.upper() in os.environ: self.secret_key = os.environ[secret_key_name.upper()] boto.log.debug("Using secret key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, secret_key_name): + self.secret_key = shared.get(profile_name, secret_key_name) + boto.log.debug("Using secret key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, secret_key_name): + self.secret_key = config.get("profile %s" % profile_name, + secret_key_name) + boto.log.debug("Using secret key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', secret_key_name): + self.secret_key = shared.get('default', secret_key_name) + boto.log.debug("Using secret key found in shared credential file.") elif config.has_option('Credentials', secret_key_name): self.secret_key = config.get('Credentials', secret_key_name) boto.log.debug("Using secret key found in config file.") @@ -273,6 +336,30 @@ keyring_name, self.access_key) boto.log.debug("Using secret key found in keyring.") + if security_token is not None: + self.security_token = security_token + boto.log.debug("Using security token provided by client.") + elif ((security_token_name is not None) and + (access_key is None) and (secret_key is None)): + # Only provide a token from the environment/config if the + # caller did not specify a key and secret. Otherwise an + # environment/config token could be paired with a + # different set of credentials provided by the caller + if security_token_name.upper() in os.environ: + self.security_token = os.environ[security_token_name.upper()] + boto.log.debug("Using security token found in environment" + " variable.") + elif shared.has_option(profile_name or 'default', + security_token_name): + self.security_token = shared.get(profile_name or 'default', + security_token_name) + boto.log.debug("Using security token found in shared " + "credential file.") + elif config.has_option('Credentials', security_token_name): + self.security_token = config.get('Credentials', + security_token_name) + boto.log.debug("Using security token found in config file.") + if ((self._access_key is None or self._secret_key is None) and self.MetadataServiceSupport[self.name]): self._populate_keys_from_metadata_server() @@ -332,6 +419,7 @@ self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] self.version_id = header_info_map[VERSION_ID_HEADER_KEY] self.mfa_header = header_info_map[MFA_HEADER_KEY] + self.restore_header = header_info_map[RESTORE_HEADER_KEY] def configure_errors(self): error_map = self.ErrorMap[self.name] diff -Nru python-boto-2.20.1/boto/pyami/bootstrap.py python-boto-2.29.1/boto/pyami/bootstrap.py --- python-boto-2.20.1/boto/pyami/bootstrap.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/pyami/bootstrap.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -40,7 +40,7 @@ def __init__(self): self.working_dir = '/mnt/pyami' self.write_metadata() - ScriptBase.__init__(self) + super(Bootstrap, self).__init__() def write_metadata(self): fp = open(os.path.expanduser(BotoConfigPath), 'w') diff -Nru python-boto-2.20.1/boto/pyami/config.py python-boto-2.29.1/boto/pyami/config.py --- python-boto-2.20.1/boto/pyami/config.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/pyami/config.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,20 +20,15 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -import StringIO, os, re -import warnings import ConfigParser +import os +import re +import StringIO +import warnings + import boto +from boto.compat import expanduser -# If running in Google App Engine there is no "user" and -# os.path.expanduser() will fail. Attempt to detect this case and use a -# no-op expanduser function in this case. -try: - os.path.expanduser('~') - expanduser = os.path.expanduser -except (AttributeError, ImportError): - # This is probably running on App Engine. - expanduser = (lambda x: x) # By default we use two locations for the boto configurations, # /etc/boto.cfg and ~/.boto (which works on Windows and Unix). @@ -42,7 +37,7 @@ UserConfigPath = os.path.join(expanduser('~'), '.boto') BotoConfigLocations.append(UserConfigPath) -# If there's a BOTO_CONFIG variable set, we load ONLY +# If there's a BOTO_CONFIG variable set, we load ONLY # that variable if 'BOTO_CONFIG' in os.environ: BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] @@ -58,6 +53,8 @@ class Config(ConfigParser.SafeConfigParser): def __init__(self, path=None, fp=None, do_load=True): + # We don't use ``super`` here, because ``ConfigParser`` still uses + # old-style classes. ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'}) if do_load: @@ -147,14 +144,14 @@ except: val = default return val - + def getint(self, section, name, default=0): try: val = ConfigParser.SafeConfigParser.getint(self, section, name) except: val = int(default) return val - + def getfloat(self, section, name, default=0.0): try: val = ConfigParser.SafeConfigParser.getfloat(self, section, name) @@ -172,13 +169,13 @@ else: val = default return val - + def setbool(self, section, name, value): if value: self.set(section, name, 'true') else: self.set(section, name, 'false') - + def dump(self): s = StringIO.StringIO() self.write(s) @@ -194,7 +191,7 @@ fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) else: fp.write('%s = %s\n' % (option, self.get(section, option))) - + def dump_to_sdb(self, domain_name, item_name): from boto.compat import json sdb = boto.connect_sdb() @@ -221,7 +218,7 @@ d = json.loads(item[section]) for attr_name in d.keys(): attr_value = d[attr_name] - if attr_value == None: + if attr_value is None: attr_value = 'None' if isinstance(attr_value, bool): self.setbool(section, attr_name, attr_value) diff -Nru python-boto-2.20.1/boto/pyami/copybot.py python-boto-2.29.1/boto/pyami/copybot.py --- python-boto-2.20.1/boto/pyami/copybot.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/pyami/copybot.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,7 @@ class CopyBot(ScriptBase): def __init__(self): - ScriptBase.__init__(self) + super(CopyBot, self).__init__() self.wdir = boto.config.get('Pyami', 'working_dir') self.log_file = '%s.log' % self.instance_id self.log_path = os.path.join(self.wdir, self.log_file) @@ -80,7 +80,7 @@ def copy_log(self): key = self.dst.new_key(self.log_file) key.set_contents_from_filename(self.log_path) - + def main(self): fp = StringIO.StringIO() boto.config.dump_safe(fp) @@ -94,4 +94,4 @@ if boto.config.getbool(self.name, 'exit_on_completion', True): ec2 = boto.connect_ec2() ec2.terminate_instances([self.instance_id]) - + diff -Nru python-boto-2.20.1/boto/pyami/installers/ubuntu/ebs.py python-boto-2.29.1/boto/pyami/installers/ubuntu/ebs.py --- python-boto-2.20.1/boto/pyami/installers/ubuntu/ebs.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/pyami/installers/ubuntu/ebs.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -36,7 +36,7 @@ [EBS] volume_id = - logical_volume_name = device = @@ -86,7 +86,7 @@ for v in Volume.all(): v.trim_snapshots(True) """ - + TagBasedBackupCleanupScript= """#!/usr/bin/env python import boto @@ -102,7 +102,7 @@ """ def __init__(self, config_file=None): - Installer.__init__(self, config_file) + super(EBSInstaller, self).__init__(config_file) self.instance_id = boto.config.get('Instance', 'instance-id') self.device = boto.config.get('EBS', 'device', '/dev/sdp') self.volume_id = boto.config.get('EBS', 'volume_id') @@ -130,7 +130,7 @@ attempt_attach = False except EC2ResponseError, e: if e.error_code != 'IncorrectState': - # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2 + # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2 # to realize the instance is running, then try again. Otherwise, raise the error: boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors)) time.sleep(2) @@ -198,7 +198,7 @@ def install(self): # First, find and attach the volume self.attach() - + # Install the xfs tools self.run('apt-get -y install xfsprogs xfsdump') @@ -219,9 +219,9 @@ # Set up the backup cleanup script minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') - if (minute != None) and (hour != None): + if (minute is not None) and (hour is not None): # Snapshot clean up can either be done via the manage module, or via the new tag based - # snapshot code, if the snapshots have been tagged with the name of the associated + # snapshot code, if the snapshots have been tagged with the name of the associated # volume. Check for the presence of the new configuration flag, and use the appropriate # cleanup method / script: use_tag_based_cleanup = boto.config.has_option('EBS', 'use_tag_based_snapshot_cleanup') diff -Nru python-boto-2.20.1/boto/pyami/scriptbase.py python-boto-2.29.1/boto/pyami/scriptbase.py --- python-boto-2.20.1/boto/pyami/scriptbase.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/pyami/scriptbase.py 2014-05-30 20:49:34.000000000 +0000 @@ -4,7 +4,7 @@ import boto import boto.utils -class ScriptBase: +class ScriptBase(object): def __init__(self, config_file=None): self.instance_id = boto.config.get('Instance', 'instance-id', 'default') @@ -41,4 +41,4 @@ def main(self): pass - + diff -Nru python-boto-2.20.1/boto/rds/dbsubnetgroup.py python-boto-2.29.1/boto/rds/dbsubnetgroup.py --- python-boto-2.20.1/boto/rds/dbsubnetgroup.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds/dbsubnetgroup.py 2014-05-30 20:49:34.000000000 +0000 @@ -40,7 +40,7 @@ self.connection = connection self.name = name self.description = description - if subnet_ids != None: + if subnet_ids is not None: self.subnet_ids = subnet_ids else: self.subnet_ids = [] diff -Nru python-boto-2.20.1/boto/rds/__init__.py python-boto-2.29.1/boto/rds/__init__.py --- python-boto-2.20.1/boto/rds/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,6 +31,9 @@ from boto.rds.regioninfo import RDSRegionInfo from boto.rds.dbsubnetgroup import DBSubnetGroup from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.regioninfo import get_regions +from boto.rds.logfile import LogFile, LogFileObject + def regions(): """ @@ -39,25 +42,11 @@ :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` """ - return [RDSRegionInfo(name='us-east-1', - endpoint='rds.amazonaws.com'), - RDSRegionInfo(name='us-gov-west-1', - endpoint='rds.us-gov-west-1.amazonaws.com'), - RDSRegionInfo(name='eu-west-1', - endpoint='rds.eu-west-1.amazonaws.com'), - RDSRegionInfo(name='us-west-1', - endpoint='rds.us-west-1.amazonaws.com'), - RDSRegionInfo(name='us-west-2', - endpoint='rds.us-west-2.amazonaws.com'), - RDSRegionInfo(name='sa-east-1', - endpoint='rds.sa-east-1.amazonaws.com'), - RDSRegionInfo(name='ap-northeast-1', - endpoint='rds.ap-northeast-1.amazonaws.com'), - RDSRegionInfo(name='ap-southeast-1', - endpoint='rds.ap-southeast-1.amazonaws.com'), - RDSRegionInfo(name='ap-southeast-2', - endpoint='rds.ap-southeast-2.amazonaws.com'), - ] + return get_regions( + 'rds', + region_cls=RDSRegionInfo, + connection_cls=RDSConnection + ) def connect_to_region(region_name, **kw_params): @@ -92,19 +81,21 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, + profile_name=None): if not region: region = RDSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(RDSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -169,7 +160,7 @@ iops=None, vpc_security_groups=None, ): - # API version: 2012-09-17 + # API version: 2013-09-09 # Parameter notes: # ================= # id should be db_instance_identifier according to API docs but has been left @@ -196,20 +187,23 @@ :param allocated_storage: Initially allocated storage size, in GBs. Valid values are depending on the engine value. - * MySQL = 5--1024 - * oracle-se1 = 10--1024 - * oracle-se = 10--1024 - * oracle-ee = 10--1024 + * MySQL = 5--3072 + * oracle-se1 = 10--3072 + * oracle-se = 10--3072 + * oracle-ee = 10--3072 * sqlserver-ee = 200--1024 * sqlserver-se = 200--1024 * sqlserver-ex = 30--1024 * sqlserver-web = 30--1024 + * postgres = 5--3072 :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Valid values are: + * db.t1.micro * db.m1.small + * db.m1.medium * db.m1.large * db.m1.xlarge * db.m2.xlarge @@ -227,6 +221,7 @@ * sqlserver-se * sqlserver-ex * sqlserver-web + * postgres :type master_username: str :param master_username: Name of master user for the DBInstance. @@ -263,7 +258,10 @@ * Oracle defaults to 1521 - * SQL Server defaults to 1433 and _cannot_ be 1434 or 3389 + * SQL Server defaults to 1433 and _cannot_ be 1434, 3389, + 47001, 49152, and 49152 through 49156. + + * PostgreSQL defaults to 5432 :type db_name: str :param db_name: * MySQL: @@ -280,6 +278,15 @@ * SQL Server: Not applicable and must be None. + * PostgreSQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--63 alphanumeric characters. Must + begin with a letter or an underscore. Subsequent + characters can be letters, underscores, or digits (0-9) + and cannot be a reserved PostgreSQL word. + :type param_group: str or ParameterGroup object :param param_group: Name of DBParameterGroup or ParameterGroup instance to associate with this DBInstance. If no groups are @@ -326,6 +333,8 @@ * SQL Server format example: 10.50.2789.0.v1 + * PostgreSQL format example: 9.3 + :type auto_minor_version_upgrade: bool :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied @@ -1067,6 +1076,91 @@ return self.get_list('DescribeDBSnapshots', params, [('DBSnapshot', DBSnapshot)]) + def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None): + """ + Get all log files + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type max_records: int + :param max_records: Number of log file names to return. + + :type marker: str + :param marker: The marker provided by a previous request. + + :file_size: int + :param file_size: Filter results to files large than this size in bytes. + + :filename_contains: str + :param filename_contains: Filter results to files with filename containing this string + + :file_last_written: int + :param file_last_written: Filter results to files written after this time (POSIX timestamp) + + :rtype: list + :return: A list of :class:`boto.rds.logfile.LogFile` + """ + params = {'DBInstanceIdentifier': dbinstance_id} + + if file_size: + params['FileSize'] = file_size + + if filename_contains: + params['FilenameContains'] = filename_contains + + if file_last_written: + params['FileLastWritten'] = file_last_written + + if marker: + params['Marker'] = marker + + if max_records: + params['MaxRecords'] = max_records + + return self.get_list('DescribeDBLogFiles', params, + [('DescribeDBLogFilesDetails',LogFile)]) + + def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None): + """ + Download a log file from RDS + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type log_file_name: str + :param log_file_name: The name of the log file to retrieve + + :type marker: str + :param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If + no marker is specified, this will fetch log lines from the end of file instead. + + :type number_of_lines: int + :param marker: The maximium number of lines to be returned. + """ + + params = { + 'DBInstanceIdentifier': dbinstance_id, + 'LogFileName': log_file_name, + } + + if marker: + params['Marker'] = marker + + if number_of_lines: + params['NumberOfLines'] = number_of_lines + + if max_records: + params['MaxRecords'] = max_records + + logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject) + + if logfile: + logfile.log_filename = log_file_name + logfile.dbinstance_id = dbinstance_id + + return logfile + def create_dbsnapshot(self, snapshot_id, dbinstance_id): """ Create a new DB snapshot. @@ -1084,21 +1178,21 @@ params = {'DBSnapshotIdentifier': snapshot_id, 'DBInstanceIdentifier': dbinstance_id} return self.get_object('CreateDBSnapshot', params, DBSnapshot) - + def copy_dbsnapshot(self, source_snapshot_id, target_snapshot_id): """ Copies the specified DBSnapshot. - + :type source_snapshot_id: string :param source_snapshot_id: The identifier for the source DB snapshot. - + :type target_snapshot_id: string :param target_snapshot_id: The identifier for the copied snapshot. - + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` - :return: The newly created DBSnapshot. + :return: The newly created DBSnapshot. """ - params = {'SourceDBSnapshotIdentifier': source_snapshot_id, + params = {'SourceDBSnapshotIdentifier': source_snapshot_id, 'TargetDBSnapshotIdentifier': target_snapshot_id} return self.get_object('CopyDBSnapshot', params, DBSnapshot) @@ -1364,11 +1458,11 @@ :return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` """ params = dict() - if name != None: + if name is not None: params['DBSubnetGroupName'] = name - if max_records != None: + if max_records is not None: params['MaxRecords'] = max_records - if marker != None: + if marker is not None: params['Marker'] = marker return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)]) @@ -1387,9 +1481,9 @@ :return: The newly created ParameterGroup """ params = {'DBSubnetGroupName': name} - if description != None: + if description is not None: params['DBSubnetGroupDescription'] = description - if subnet_ids != None: + if subnet_ids is not None: self.build_list_params(params, subnet_ids, 'SubnetIds.member') return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup) diff -Nru python-boto-2.20.1/boto/rds/logfile.py python-boto-2.29.1/boto/rds/logfile.py --- python-boto-2.20.1/boto/rds/logfile.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds/logfile.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,68 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Jumping Qu http://newrice.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LogFile(object): + + def __init__(self, connection=None): + self.connection = connection + self.size = None + self.log_filename = None + self.last_written = None + + def __repr__(self): + #return '(%s, %s, %s)' % (self.logfilename, self.size, self.lastwritten) + return '%s' % (self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LastWritten': + self.last_written = value + elif name == 'LogFileName': + self.log_filename = value + elif name == 'Size': + self.size = value + else: + setattr(self, name, value) + + +class LogFileObject(object): + def __init__(self, connection=None): + self.connection = connection + self.log_filename = None + + def __repr__(self): + return "LogFileObject: %s/%s" % (self.dbinstance_id, self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LogFileData': + self.data = value + elif name == 'AdditionalDataPending': + self.additional_data_pending = value + elif name == 'Marker': + self.marker = value + else: + setattr(self, name, value) diff -Nru python-boto-2.20.1/boto/rds/parametergroup.py python-boto-2.29.1/boto/rds/parametergroup.py --- python-boto-2.20.1/boto/rds/parametergroup.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds/parametergroup.py 2014-05-30 20:49:34.000000000 +0000 @@ -133,7 +133,7 @@ d[prefix+'ApplyMethod'] = self.apply_method def _set_string_value(self, value): - if not isinstance(value, str) or isinstance(value, unicode): + if not isinstance(value, basestring): raise ValueError('value must be of type str') if self.allowed_values: choices = self.allowed_values.split(',') @@ -142,7 +142,7 @@ self._value = value def _set_integer_value(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): value = int(value) if isinstance(value, int) or isinstance(value, long): if self.allowed_values: @@ -156,7 +156,7 @@ def _set_boolean_value(self, value): if isinstance(value, bool): self._value = value - elif isinstance(value, str) or isinstance(value, unicode): + elif isinstance(value, basestring): if value.lower() == 'true': self._value = True else: @@ -175,7 +175,7 @@ raise TypeError('unknown type (%s)' % self.type) def get_value(self): - if self._value == None: + if self._value is None: return self._value if self.type == 'string': return self._value diff -Nru python-boto-2.20.1/boto/rds/regioninfo.py python-boto-2.29.1/boto/rds/regioninfo.py --- python-boto-2.20.1/boto/rds/regioninfo.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds/regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -16,7 +16,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,8 @@ class RDSRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.rds import RDSConnection - RegionInfo.__init__(self, connection, name, endpoint, + super(RDSRegionInfo, self).__init__(connection, name, endpoint, RDSConnection) diff -Nru python-boto-2.20.1/boto/rds2/exceptions.py python-boto-2.29.1/boto/rds2/exceptions.py --- python-boto-2.20.1/boto/rds2/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds2/exceptions.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,234 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class InvalidSubnet(JSONResponseError): + pass + + +class DBParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class DBSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class DBSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class InstanceQuotaExceeded(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class InvalidDBParameterGroupState(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class InsufficientDBInstanceCapacity(JSONResponseError): + pass + + +class ReservedDBInstanceQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotFound(JSONResponseError): + pass + + +class DBInstanceAlreadyExists(JSONResponseError): + pass + + +class ReservedDBInstanceNotFound(JSONResponseError): + pass + + +class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError): + pass + + +class InvalidDBSecurityGroupState(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ReservedDBInstancesOfferingNotFound(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class SnapshotQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupQuotaExceeded(JSONResponseError): + pass + + +class DBParameterGroupNotFound(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class InvalidDBSubnetGroupState(JSONResponseError): + pass + + +class DBSubnetGroupNotFound(JSONResponseError): + pass + + +class InvalidOptionGroupState(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotSupported(JSONResponseError): + pass + + +class InvalidEventSubscriptionState(JSONResponseError): + pass + + +class InvalidDBSubnetState(JSONResponseError): + pass + + +class InvalidDBSnapshotState(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class DBSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class ProvisionedIopsNotAvailableInAZ(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class OptionGroupAlreadyExists(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class DBUpgradeDependencyFailure(JSONResponseError): + pass + + +class PointInTimeRestoreNotEnabled(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class DBSubnetQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupNotFound(JSONResponseError): + pass + + +class DBParameterGroupAlreadyExists(JSONResponseError): + pass + + +class DBInstanceNotFound(JSONResponseError): + pass + + +class ReservedDBInstanceAlreadyExists(JSONResponseError): + pass + + +class InvalidDBInstanceState(JSONResponseError): + pass + + +class DBSnapshotNotFound(JSONResponseError): + pass + + +class DBSnapshotAlreadyExists(JSONResponseError): + pass + + +class StorageQuotaExceeded(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass diff -Nru python-boto-2.20.1/boto/rds2/__init__.py python-boto-2.29.1/boto/rds2/__init__.py --- python-boto-2.20.1/boto/rds2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.rds2.layer1 import RDSConnection + return get_regions('rds', connection_cls=RDSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds2.layer1.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff -Nru python-boto-2.20.1/boto/rds2/layer1.py python-boto-2.29.1/boto/rds2/layer1.py --- python-boto-2.20.1/boto/rds2/layer1.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/rds2/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,3774 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.rds2 import exceptions + + +class RDSConnection(AWSQueryConnection): + """ + Amazon Relational Database Service + Amazon Relational Database Service (Amazon RDS) is a web service + that makes it easier to set up, operate, and scale a relational + database in the cloud. It provides cost-efficient, resizable + capacity for an industry-standard relational database and manages + common database administration tasks, freeing up developers to + focus on what makes their applications and businesses unique. + + Amazon RDS gives you access to the capabilities of a familiar + MySQL or Oracle database server. This means the code, + applications, and tools you already use today with your existing + MySQL or Oracle databases work with Amazon RDS without + modification. Amazon RDS automatically backs up your database and + maintains the database software that powers your DB instance. + Amazon RDS is flexible: you can scale your database instance's + compute resources and storage capacity to meet your application's + demand. As with all Amazon Web Services, there are no up-front + investments, and you pay only for the resources you use. + + This is the Amazon RDS API Reference . It contains a comprehensive + description of all Amazon RDS Query APIs and data types. Note that + this API is asynchronous and some actions may require polling to + determine when an action has been applied. See the parameter + description to determine if a change is applied immediately or on + the next instance reboot or during the maintenance window. For + more information on Amazon RDS concepts and usage scenarios, go to + the `Amazon RDS User Guide`_. + """ + APIVersion = "2013-09-09" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidSubnet": exceptions.InvalidSubnet, + "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded, + "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists, + "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded, + "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded, + "InvalidRestore": exceptions.InvalidRestore, + "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists, + "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity, + "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded, + "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound, + "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists, + "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound, + "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs, + "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded, + "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded, + "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState, + "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound, + "InvalidOptionGroupState": exceptions.InvalidOptionGroupState, + "SourceNotFound": exceptions.SourceNotFound, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported, + "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState, + "InvalidDBSubnetState": exceptions.InvalidDBSubnetState, + "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded, + "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure, + "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded, + "OptionGroupNotFound": exceptions.OptionGroupNotFound, + "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists, + "DBInstanceNotFound": exceptions.DBInstanceNotFound, + "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists, + "InvalidDBInstanceState": exceptions.InvalidDBInstanceState, + "DBSnapshotNotFound": exceptions.DBSnapshotNotFound, + "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists, + "StorageQuotaExceeded": exceptions.StorageQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(RDSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_source_identifier_to_subscription(self, subscription_name, + source_identifier): + """ + Adds a source identifier to an existing RDS event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to add a source identifier to. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source to be added. An identifier must + begin with a letter and must contain only ASCII letters, digits, + and hyphens; it cannot end with a hyphen or contain two consecutive + hyphens. + + Constraints: + + + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='AddSourceIdentifierToSubscription', + verb='POST', + path='/', params=params) + + def add_tags_to_resource(self, resource_name, tags): + """ + Adds metadata tags to an Amazon RDS resource. These tags can + also be used with cost allocation reporting to track cost + associated with Amazon RDS resources, or used in Condition + statement in IAM policy for Amazon RDS. + + For an overview on tagging Amazon RDS resources, see `Tagging + Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be added + to. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tags: list + :param tags: The tags to be assigned to the Amazon RDS resource. + + """ + params = {'ResourceName': resource_name, } + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='AddTagsToResource', + verb='POST', + path='/', params=params) + + def authorize_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Enables ingress to a DBSecurityGroup using one of two forms of + authorization. First, EC2 or VPC security groups can be added + to the DBSecurityGroup if the application using the database + is running on EC2 or VPC instances. Second, IP ranges are + available if the application accessing your database is + running on the Internet. Required parameters for this API are + one of CIDR range, EC2SecurityGroupId for VPC, or + (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or + EC2SecurityGroupId for non-VPC). + You cannot authorize ingress from an EC2 security group in one + Region to an Amazon RDS DB instance in another. You cannot + authorize ingress from a VPC security group in one VPC to an + Amazon RDS DB instance in another. + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to add + authorization to. + + :type cidrip: string + :param cidrip: The IP range to authorize. + + :type ec2_security_group_name: string + :param ec2_security_group_name: Name of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: Id of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: AWS Account Number of the owner of + the EC2 security group specified in the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def copy_db_snapshot(self, source_db_snapshot_identifier, + target_db_snapshot_identifier, tags=None): + """ + Copies the specified DBSnapshot. The source DBSnapshot must be + in the "available" state. + + :type source_db_snapshot_identifier: string + :param source_db_snapshot_identifier: The identifier for the source DB + snapshot. + Constraints: + + + + Must be the identifier for a valid system snapshot in the "available" + state. + + + Example: `rds:mydb-2012-04-02-00-01` + + :type target_db_snapshot_identifier: string + :param target_db_snapshot_identifier: The identifier for the copied + snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-db-snapshot` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier, + 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CopyDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_instance(self, db_instance_identifier, allocated_storage, + db_instance_class, engine, master_username, + master_user_password, db_name=None, + db_security_groups=None, + vpc_security_group_ids=None, + availability_zone=None, db_subnet_group_name=None, + preferred_maintenance_window=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, port=None, + multi_az=None, engine_version=None, + auto_minor_version_upgrade=None, + license_model=None, iops=None, + option_group_name=None, character_set_name=None, + publicly_accessible=None, tags=None): + """ + Creates a new DB instance. + + :type db_name: string + :param db_name: The meaning of this parameter differs according to the + database engine you use. + **MySQL** + + The name of the database to create when the DB instance is created. If + this parameter is not specified, no database is created in the DB + instance. + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters + + Cannot be a word reserved by the specified database engine + + + Type: String + + **Oracle** + + The Oracle System ID (SID) of the created DB instance. + + Default: `ORCL` + + Constraints: + + + + Cannot be longer than 8 characters + + + **SQL Server** + + Not applicable. Must be null. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This + parameter is stored as a lowercase string. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + for SQL Server). + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + + Example: `mydbinstance` + + :type allocated_storage: integer + :param allocated_storage: The amount of storage (in gigabytes) to be + initially allocated for the database instance. + **MySQL** + + Constraints: Must be an integer from 5 to 1024. + + Type: Integer + + **Oracle** + + Constraints: Must be an integer from 10 to 1024. + + **SQL Server** + + Constraints: Must be an integer from 200 to 1024 (Standard Edition and + Enterprise Edition) or from 30 to 1024 (Express Edition and Web + Edition) + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the DB + instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + :type engine: string + :param engine: The name of the database engine to be used for this + instance. + Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | + `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` + + :type master_username: string + :param master_username: + The name of master user for the client DB instance. + + **MySQL** + + Constraints: + + + + Must be 1 to 16 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + Type: String + + **Oracle** + + Constraints: + + + + Must be 1 to 30 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + **SQL Server** + + Constraints: + + + + Must be 1 to 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + :type master_user_password: string + :param master_user_password: The password for the master database user. + Can be any printable ASCII character except "/", '"', or "@". + Type: String + + **MySQL** + + Constraints: Must contain from 8 to 41 characters. + + **Oracle** + + Constraints: Must contain from 8 to 30 characters. + + **SQL Server** + + Constraints: Must contain from 8 to 128 characters. + + :type db_security_groups: list + :param db_security_groups: A list of DB security groups to associate + with this DB instance. + Default: The default DB security group for the database engine. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of EC2 VPC security groups to + associate with this DB instance. + Default: The default EC2 VPC security group for the DB subnet group's + VPC. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + Constraint: The AvailabilityZone parameter cannot be specified if the + MultiAZ parameter is set to `True`. The specified Availability Zone + must be in the same region as the current endpoint. + + :type db_subnet_group_name: string + :param db_subnet_group_name: A DB subnet group to associate with this + DB instance. + If there is no DB subnet group, then it is a non-VPC DB instance. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. To see the + time blocks available, see ` Adjusting the Preferred Maintenance + Window`_ in the Amazon RDS User Guide. + + Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + + Constraints: Minimum 30-minute window. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group to associate with this DB instance. + If this argument is omitted, the default DBParameterGroup for the + specified engine will be used. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days for which automated backups are retained. Setting + this parameter to a positive number enables backups. Setting this + parameter to 0 disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + :type port: integer + :param port: The port number on which the database accepts connections. + **MySQL** + + Default: `3306` + + Valid Values: `1150-65535` + + Type: Integer + + **Oracle** + + Default: `1521` + + Valid Values: `1150-65535` + + **SQL Server** + + Default: `1433` + + Valid Values: `1150-65535` except for `1434` and `3389`. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + You cannot set the AvailabilityZone parameter if the MultiAZ + parameter is set to true. + + :type engine_version: string + :param engine_version: The version number of the database engine to + use. + **MySQL** + + Example: `5.1.42` + + Type: String + + **Oracle** + + Example: `11.2.0.2.v2` + + Type: String + + **SQL Server** + + Example: `10.50.2789.0.v1` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the DB instance during the + maintenance window. + Default: `True` + + :type license_model: string + :param license_model: License model information for this DB instance. + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type character_set_name: string + :param character_set_name: For supported engines, indicates that the DB + instance should be associated with the specified CharacterSet. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'AllocatedStorage': allocated_storage, + 'DBInstanceClass': db_instance_class, + 'Engine': engine, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if port is not None: + params['Port'] = port + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if character_set_name is not None: + params['CharacterSetName'] = character_set_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstance', + verb='POST', + path='/', params=params) + + def create_db_instance_read_replica(self, db_instance_identifier, + source_db_instance_identifier, + db_instance_class=None, + availability_zone=None, port=None, + auto_minor_version_upgrade=None, + iops=None, option_group_name=None, + publicly_accessible=None, tags=None): + """ + Creates a DB instance that acts as a read replica of a source + DB instance. + + All read replica DB instances are created as Single-AZ + deployments with backups disabled. All other DB instance + attributes (including DB security groups and DB parameter + groups) are inherited from the source DB instance, except as + specified below. + + The source DB instance must have backup retention enabled. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier of the read + replica. This is the unique key that identifies a DB instance. This + parameter is stored as a lowercase string. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: The identifier of the DB instance + that will act as the source for the read replica. Each DB instance + can have up to five read replicas. + Constraints: Must be the identifier of an existing DB instance that is + not already a read replica DB instance. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the read + replica. + Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + Default: Inherits from the source DB instance. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone that the + read replica will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + :type port: integer + :param port: The port number that the DB instance uses for connections. + Default: Inherits from the source DB instance + + Valid Values: `1150-65535` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the read replica during the + maintenance window. + Default: Inherits from the source DB instance + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + + :type option_group_name: string + :param option_group_name: The option group the DB instance will be + associated with. If omitted, the default option group for the + engine specified will be used. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if port is not None: + params['Port'] = port + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstanceReadReplica', + verb='POST', + path='/', params=params) + + def create_db_parameter_group(self, db_parameter_group_name, + db_parameter_group_family, description, + tags=None): + """ + Creates a new DB parameter group. + + A DB parameter group is initially created with the default + parameters for the database engine used by the DB instance. To + provide custom values for any of the parameters, you must + modify the group after creating it using + ModifyDBParameterGroup . Once you've created a DB parameter + group, you need to associate it with your DB instance using + ModifyDBInstance . When you associate a new DB parameter group + with a running DB instance, you need to reboot the DB Instance + for the new DB parameter group and associated settings to take + effect. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + This value is stored as a lower-case string. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The DB parameter group family name. A + DB parameter group can be associated with one and only one DB + parameter group family, and can be applied only to a DB instance + running a database engine and engine version compatible with that + DB parameter group family. + + :type description: string + :param description: The description for the DB parameter group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBParameterGroupName': db_parameter_group_name, + 'DBParameterGroupFamily': db_parameter_group_family, + 'Description': description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBParameterGroup', + verb='POST', + path='/', params=params) + + def create_db_security_group(self, db_security_group_name, + db_security_group_description, tags=None): + """ + Creates a new DB security group. DB security groups control + access to a DB instance. + + :type db_security_group_name: string + :param db_security_group_name: The name for the DB security group. This + value is stored as a lowercase string. + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + + Example: `mysecuritygroup` + + :type db_security_group_description: string + :param db_security_group_description: The description for the DB + security group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSecurityGroupName': db_security_group_name, + 'DBSecurityGroupDescription': db_security_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSecurityGroup', + verb='POST', + path='/', params=params) + + def create_db_snapshot(self, db_snapshot_identifier, + db_instance_identifier, tags=None): + """ + Creates a DBSnapshot. The source DBInstance must be in + "available" state. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The identifier for the DB snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This is the unique key that identifies a DB + instance. This parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSnapshotIdentifier': db_snapshot_identifier, + 'DBInstanceIdentifier': db_instance_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_subnet_group(self, db_subnet_group_name, + db_subnet_group_description, subnet_ids, + tags=None): + """ + Creates a new DB subnet group. DB subnet groups must contain + at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 Subnet IDs for the DB subnet group. + + :type tags: list + :param tags: A list of tags into tuples. + + """ + params = { + 'DBSubnetGroupName': db_subnet_group_name, + 'DBSubnetGroupDescription': db_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, event_categories=None, + source_ids=None, enabled=None, tags=None): + """ + Creates an RDS event notification subscription. This action + requires a topic ARN (Amazon Resource Name) created by either + the RDS console, the SNS console, or the SNS API. To obtain an + ARN with SNS, you must create a topic in Amazon SNS and + subscribe to the topic. The ARN is displayed in the SNS + console. + + You can specify the type of source (SourceType) you want to be + notified of, provide a list of RDS sources (SourceIds) that + triggers the events, and provide a list of event categories + (EventCategories) for events you want to be notified of. For + example, you can specify SourceType = db-instance, SourceIds = + mydbinstance1, mydbinstance2 and EventCategories = + Availability, Backup. + + If you specify both the SourceType and SourceIds, such as + SourceType = db-instance and SourceIdentifier = myDBInstance1, + you will be notified of all the db-instance events for the + specified source. If you specify a SourceType but do not + specify a SourceIdentifier, you will receive notice of the + events for that source type for all your RDS sources. If you + do not specify either the SourceType nor the SourceIdentifier, + you will be notified of events generated from all RDS sources + belonging to your customer account. + + :type subscription_name: string + :param subscription_name: The name of the subscription. + Constraints: The name must be less than 255 characters. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type source_ids: list + :param source_ids: + The list of identifiers of the event sources for which events will be + returned. If not specified, then all sources are included in the + response. An identifier must begin with a letter and must contain + only ASCII letters, digits, and hyphens; it cannot end with a + hyphen or contain two consecutive hyphens. + + Constraints: + + + + If SourceIds are supplied, SourceType must also be provided. + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription, set to **false** to create the subscription but not + active it. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_option_group(self, option_group_name, engine_name, + major_engine_version, option_group_description, + tags=None): + """ + Creates a new option group. You can create up to 20 option + groups. + + :type option_group_name: string + :param option_group_name: Specifies the name of the option group to be + created. + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `myoptiongroup` + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be associated with. + + :type option_group_description: string + :param option_group_description: The description of the option group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'OptionGroupName': option_group_name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': option_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateOptionGroup', + verb='POST', + path='/', params=params) + + def delete_db_instance(self, db_instance_identifier, + skip_final_snapshot=None, + final_db_snapshot_identifier=None): + """ + The DeleteDBInstance action deletes a previously provisioned + DB instance. A successful response from the web service + indicates the request was received correctly. When you delete + a DB instance, all automated backups for that instance are + deleted and cannot be recovered. Manual DB snapshots of the DB + instance to be deleted are not deleted. + + If a final DB snapshot is requested the status of the RDS + instance will be "deleting" until the DB snapshot is created. + The API action `DescribeDBInstance` is used to monitor the + status of this operation. The action cannot be canceled or + reverted once submitted. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier for the DB instance to be deleted. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type skip_final_snapshot: boolean + :param skip_final_snapshot: Determines whether a final DB snapshot is + created before the DB instance is deleted. If `True` is specified, + no DBSnapshot is created. If false is specified, a DB snapshot is + created before the DB instance is deleted. + The FinalDBSnapshotIdentifier parameter must be specified if + SkipFinalSnapshot is `False`. + + Default: `False` + + :type final_db_snapshot_identifier: string + :param final_db_snapshot_identifier: + The DBSnapshotIdentifier of the new DBSnapshot created when + SkipFinalSnapshot is set to `False`. + + Specifying this parameter and also setting the SkipFinalShapshot + parameter to true results in an error. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if skip_final_snapshot is not None: + params['SkipFinalSnapshot'] = str( + skip_final_snapshot).lower() + if final_db_snapshot_identifier is not None: + params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier + return self._make_request( + action='DeleteDBInstance', + verb='POST', + path='/', params=params) + + def delete_db_parameter_group(self, db_parameter_group_name): + """ + Deletes a specified DBParameterGroup. The DBParameterGroup + cannot be associated with any RDS instances to be deleted. + The specified DB parameter group cannot be associated with any + DB instances. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + You cannot delete a default DB parameter group + + Cannot be associated with any DB instances + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + return self._make_request( + action='DeleteDBParameterGroup', + verb='POST', + path='/', params=params) + + def delete_db_security_group(self, db_security_group_name): + """ + Deletes a DB security group. + The specified DB security group must not be associated with + any DB instances. + + :type db_security_group_name: string + :param db_security_group_name: + The name of the DB security group to delete. + + You cannot delete the default DB security group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + return self._make_request( + action='DeleteDBSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_db_snapshot(self, db_snapshot_identifier): + """ + Deletes a DBSnapshot. + The DBSnapshot must be in the `available` state to be deleted. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The DBSnapshot identifier. + Constraints: Must be the name of an existing DB snapshot in the + `available` state. + + """ + params = {'DBSnapshotIdentifier': db_snapshot_identifier, } + return self._make_request( + action='DeleteDBSnapshot', + verb='POST', + path='/', params=params) + + def delete_db_subnet_group(self, db_subnet_group_name): + """ + Deletes a DB subnet group. + The specified database subnet group must not be associated + with any DB instances. + + :type db_subnet_group_name: string + :param db_subnet_group_name: + The name of the database subnet group to delete. + + You cannot delete the default subnet group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + return self._make_request( + action='DeleteDBSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an RDS event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to delete. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_option_group(self, option_group_name): + """ + Deletes an existing option group. + + :type option_group_name: string + :param option_group_name: + The name of the option group to be deleted. + + You cannot delete default option groups. + + """ + params = {'OptionGroupName': option_group_name, } + return self._make_request( + action='DeleteOptionGroup', + verb='POST', + path='/', params=params) + + def describe_db_engine_versions(self, engine=None, engine_version=None, + db_parameter_group_family=None, + max_records=None, marker=None, + default_only=None, + list_supported_character_sets=None): + """ + Returns a list of the available DB engines. + + :type engine: string + :param engine: The database engine to return. + + :type engine_version: string + :param engine_version: The database engine version to return. + Example: `5.1.49` + + :type db_parameter_group_family: string + :param db_parameter_group_family: + The name of a specific DB parameter group family to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + :type default_only: boolean + :param default_only: Indicates that only the default version of the + specified engine or engine and major version combination is + returned. + + :type list_supported_character_sets: boolean + :param list_supported_character_sets: If this parameter is specified, + and if the requested engine supports the CharacterSetName parameter + for CreateDBInstance, the response includes a list of supported + character sets for each engine version. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_parameter_group_family is not None: + params['DBParameterGroupFamily'] = db_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + if list_supported_character_sets is not None: + params['ListSupportedCharacterSets'] = str( + list_supported_character_sets).lower() + return self._make_request( + action='DescribeDBEngineVersions', + verb='POST', + path='/', params=params) + + def describe_db_instances(self, db_instance_identifier=None, + filters=None, max_records=None, marker=None): + """ + Returns information about provisioned RDS instances. This API + supports pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + The user-supplied instance identifier. If this parameter is specified, + information from only the specific DB instance is returned. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBInstances request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords` . + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBInstances', + verb='POST', + path='/', params=params) + + def describe_db_log_files(self, db_instance_identifier, + filename_contains=None, file_last_written=None, + file_size=None, max_records=None, marker=None): + """ + Returns a list of DB log files for the DB instance. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filename_contains: string + :param filename_contains: Filters the available log files for log file + names that contain the specified string. + + :type file_last_written: long + :param file_last_written: Filters the available log files for files + written since the specified date, in POSIX timestamp format. + + :type file_size: long + :param file_size: Filters the available log files for files larger than + the specified size. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified MaxRecords + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if filename_contains is not None: + params['FilenameContains'] = filename_contains + if file_last_written is not None: + params['FileLastWritten'] = file_last_written + if file_size is not None: + params['FileSize'] = file_size + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBLogFiles', + verb='POST', + path='/', params=params) + + def describe_db_parameter_groups(self, db_parameter_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBParameterGroup` descriptions. If a + `DBParameterGroupName` is specified, the list will contain + only the description of the specified DB parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameterGroups` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameterGroups', + verb='POST', + path='/', params=params) + + def describe_db_parameters(self, db_parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns the detailed parameter list for a particular DB + parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type source: string + :param source: The parameter types to return. + Default: All parameter types returned + + Valid Values: `user | system | engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameters` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameters', + verb='POST', + path='/', params=params) + + def describe_db_security_groups(self, db_security_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBSecurityGroup` descriptions. If a + `DBSecurityGroupName` is specified, the list will contain only + the descriptions of the specified DB security group. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + return details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSecurityGroups request. If this parameter is specified, + the response includes only records beyond the marker, up to the + value specified by `MaxRecords`. + + """ + params = {} + if db_security_group_name is not None: + params['DBSecurityGroupName'] = db_security_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_db_snapshots(self, db_instance_identifier=None, + db_snapshot_identifier=None, + snapshot_type=None, filters=None, + max_records=None, marker=None): + """ + Returns information about DB snapshots. This API supports + pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + A DB instance identifier to retrieve the list of DB snapshots for. + Cannot be used in conjunction with `DBSnapshotIdentifier`. This + parameter is not case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: + A specific DB snapshot identifier to describe. Cannot be used in + conjunction with `DBInstanceIdentifier`. This value is stored as a + lowercase string. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + If this is the identifier of an automated snapshot, the + `SnapshotType` parameter must also be specified. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots that will be returned. + Values can be "automated" or "manual." If not specified, the + returned results will include all snapshots types. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBSnapshots` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if db_snapshot_identifier is not None: + params['DBSnapshotIdentifier'] = db_snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSnapshots', + verb='POST', + path='/', params=params) + + def describe_db_subnet_groups(self, db_subnet_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of DBSubnetGroup descriptions. If a + DBSubnetGroupName is specified, the list will contain only the + descriptions of the specified DBSubnetGroup. + + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name of the DB subnet group to return + details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSubnetGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, db_parameter_group_family, + max_records=None, marker=None): + """ + Returns the default engine and system parameter information + for the specified database engine. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The name of the DB parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeEngineDefaultParameters` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = { + 'DBParameterGroupFamily': db_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of categories for all event source types, or, + if specified, for a specified source type. You can see a list + of the event categories and source types in the ` Events`_ + topic in the Amazon RDS User Guide. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + filters=None, max_records=None, + marker=None): + """ + Lists all the subscription descriptions for a customer + account. The description for a subscription includes + SubscriptionName, SNSTopicARN, CustomerID, SourceType, + SourceID, CreationTime, and Status. + + If you specify a SubscriptionName, lists the description for + that subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to describe. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + event_categories=None, max_records=None, marker=None): + """ + Returns events related to DB instances, DB security groups, DB + snapshots, and DB parameter groups for the past 14 days. + Events specific to a particular DB instance, DB security + group, database snapshot, or DB parameter group can be + obtained by providing the name as a parameter. By default, the + past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If not specified, then all sources are included in the response. + + Constraints: + + + + If SourceIdentifier is supplied, SourceType must also be provided. + + If the source type is `DBInstance`, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must + be supplied. + + If the source type is `DBParameterGroup`, a `DBParameterGroupName` + must be supplied. + + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be + supplied. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type duration: integer + :param duration: The number of minutes to retrieve events for. + Default: 60 + + :type event_categories: list + :param event_categories: A list of event categories that trigger + notifications for a event notification subscription. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeEvents request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_option_group_options(self, engine_name, + major_engine_version=None, + max_records=None, marker=None): + """ + Describes all available options. + + :type engine_name: string + :param engine_name: A required parameter. Options available for the + given Engine name will be described. + + :type major_engine_version: string + :param major_engine_version: If specified, filters the results to + include only options for the specified major engine version. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {'EngineName': engine_name, } + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOptionGroupOptions', + verb='POST', + path='/', params=params) + + def describe_option_groups(self, option_group_name=None, filters=None, + marker=None, max_records=None, + engine_name=None, major_engine_version=None): + """ + Describes the available option groups. + + :type option_group_name: string + :param option_group_name: The name of the option group to describe. + Cannot be supplied together with EngineName or MajorEngineVersion. + + :type filters: list + :param filters: + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOptionGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type engine_name: string + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: string + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific database engine version. + If specified, then EngineName must also be specified. + + """ + params = {} + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if marker is not None: + params['Marker'] = marker + if max_records is not None: + params['MaxRecords'] = max_records + if engine_name is not None: + params['EngineName'] = engine_name + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + return self._make_request( + action='DescribeOptionGroups', + verb='POST', + path='/', params=params) + + def describe_orderable_db_instance_options(self, engine, + engine_version=None, + db_instance_class=None, + license_model=None, vpc=None, + max_records=None, marker=None): + """ + Returns a list of orderable DB instance options for the + specified engine. + + :type engine: string + :param engine: The name of the engine to retrieve DB instance options + for. + + :type engine_version: string + :param engine_version: The engine version filter value. Specify this + parameter to show only the available offerings matching the + specified engine version. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type license_model: string + :param license_model: The license model filter value. Specify this + parameter to show only the available offerings matching the + specified license model. + + :type vpc: boolean + :param vpc: The VPC filter value. Specify this parameter to show only + the available VPC or non-VPC offerings. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {'Engine': engine, } + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if license_model is not None: + params['LicenseModel'] = license_model + if vpc is not None: + params['Vpc'] = str( + vpc).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableDBInstanceOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances(self, reserved_db_instance_id=None, + reserved_db_instances_offering_id=None, + db_instance_class=None, duration=None, + product_description=None, + offering_type=None, multi_az=None, + filters=None, max_records=None, + marker=None): + """ + Returns information about reserved DB instances for this + account, or about a specified reserved DB instance. + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: The reserved DB instance identifier + filter value. Specify this parameter to show only the reservation + that matches the specified reservation ID. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only purchased + reservations matching the specified offering identifier. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only those reservations matching the + specified DB instances class. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Specify this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. + Specify this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only those reservations matching the specified Multi-AZ + parameter. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstances', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances_offerings(self, + reserved_db_instances_offering_id=None, + db_instance_class=None, + duration=None, + product_description=None, + offering_type=None, + multi_az=None, + max_records=None, + marker=None): + """ + Lists available reserved DB instance offerings. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only the available + offering that matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Specify this parameter to show only reservations for this duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: Product description filter value. Specify + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only the available offerings matching the specified Multi-AZ + parameter. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstancesOfferings', + verb='POST', + path='/', params=params) + + def download_db_log_file_portion(self, db_instance_identifier, + log_file_name, marker=None, + number_of_lines=None): + """ + Downloads the last line of the specified log file. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type log_file_name: string + :param log_file_name: The name of the log file to be downloaded. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + :type number_of_lines: integer + :param number_of_lines: The number of lines remaining to be downloaded. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'LogFileName': log_file_name, + } + if marker is not None: + params['Marker'] = marker + if number_of_lines is not None: + params['NumberOfLines'] = number_of_lines + return self._make_request( + action='DownloadDBLogFilePortion', + verb='POST', + path='/', params=params) + + def list_tags_for_resource(self, resource_name): + """ + Lists all tags on an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource with tags to be listed. + This value is an Amazon Resource Name (ARN). For information about + creating an ARN, see ` Constructing an RDS Amazon Resource Name + (ARN)`_. + + """ + params = {'ResourceName': resource_name, } + return self._make_request( + action='ListTagsForResource', + verb='POST', + path='/', params=params) + + def modify_db_instance(self, db_instance_identifier, + allocated_storage=None, db_instance_class=None, + db_security_groups=None, + vpc_security_group_ids=None, + apply_immediately=None, master_user_password=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, + preferred_maintenance_window=None, multi_az=None, + engine_version=None, + allow_major_version_upgrade=None, + auto_minor_version_upgrade=None, iops=None, + option_group_name=None, + new_db_instance_identifier=None): + """ + Modify settings for a DB instance. You can change one or more + database configuration parameters by specifying these + parameters and the new values in the request. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This value is stored as a lowercase string. + + Constraints: + + + + Must be the identifier for an existing DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: integer + :param allocated_storage: The new storage capacity of the RDS instance. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + **MySQL** + + Default: Uses existing setting + + Valid Values: 5-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + **Oracle** + + Default: Uses existing setting + + Valid Values: 10-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + **SQL Server** + + Cannot be modified. + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type db_instance_class: string + :param db_instance_class: The new compute and memory capacity of the DB + instance. To determine the instance classes that are available for + a particular DB engine, use the DescribeOrderableDBInstanceOptions + action. + Passing a value for this parameter causes an outage during the change + and is applied during the next maintenance window, unless the + `ApplyImmediately` parameter is specified as `True` for this + request. + + Default: Uses existing setting + + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type db_security_groups: list + :param db_security_groups: + A list of DB security groups to authorize on this DB instance. Changing + this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: + A list of EC2 VPC security groups to authorize on this DB instance. + This change is asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type apply_immediately: boolean + :param apply_immediately: Specifies whether or not the modifications in + this request and any pending modifications are asynchronously + applied as soon as possible, regardless of the + `PreferredMaintenanceWindow` setting for the DB instance. + If this parameter is passed as `False`, changes to the DB instance are + applied on the next call to RebootDBInstance, the next maintenance + reboot, or the next failure reboot, whichever occurs first. See + each parameter to determine when a change is applied. + + Default: `False` + + :type master_user_password: string + :param master_user_password: + The new password for the DB instance master user. Can be any printable + ASCII character except "/", '"', or "@". + + Changing this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting + + Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 + alphanumeric characters (Oracle), or 8 to 128 alphanumeric + characters (SQL Server). + + Amazon RDS API actions never return the password, so this action + provides a way to regain access to a master instance user if the + password is lost. + + :type db_parameter_group_name: string + :param db_parameter_group_name: The name of the DB parameter group to + apply to this DB instance. Changing this parameter does not result + in an outage and the change is applied during the next maintenance + window unless the `ApplyImmediately` parameter is set to `True` for + this request. + Default: Uses existing setting + + Constraints: The DB parameter group must be in the same DB parameter + group family as this DB instance. + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Changing this parameter can result in an outage if you change from 0 to + a non-zero value or from a non-zero value to 0. These changes are + applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + you change the parameter from one non-zero value to another non- + zero value, the change is asynchronously applied as soon as + possible. + + Default: Uses existing setting + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas or if the DB instance is a read replica + + :type preferred_backup_window: string + :param preferred_backup_window: + The daily time range during which automated backups are created if + automated backups are enabled, as determined by the + `BackupRetentionPeriod`. Changing this parameter does not result in + an outage and the change is asynchronously applied as soon as + possible. + + Constraints: + + + + Must be in the format hh24:mi-hh24:mi + + Times should be Universal Time Coordinated (UTC) + + Must not conflict with the preferred maintenance window + + Must be at least 30 minutes + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, which may result in an + outage. Changing this parameter does not result in an outage, + except in the following situation, and the change is asynchronously + applied as soon as possible. If there are pending actions that + cause a reboot, and the maintenance window is changed to include + the current time, then changing this parameter will cause a reboot + of the DB instance. If moving this window to the current time, + there must be at least 30 minutes between the current time and end + of the window to ensure pending changes are applied. + Default: Uses existing setting + + Format: ddd:hh24:mi-ddd:hh24:mi + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + Constraints: Cannot be specified if the DB instance is a read replica. + + :type engine_version: string + :param engine_version: The version number of the database engine to + upgrade to. Changing this parameter results in an outage and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + For major version upgrades, if a non-default DB parameter group is + currently in use, a new DB parameter group in the DB parameter + group family for the new engine version must be specified. The new + DB parameter group can be the default for that DB parameter group + family. + + Example: `5.1.42` + + :type allow_major_version_upgrade: boolean + :param allow_major_version_upgrade: Indicates that major version + upgrades are allowed. Changing this parameter does not result in an + outage and the change is asynchronously applied as soon as + possible. + Constraints: This parameter must be set to true when specifying a value + for the EngineVersion parameter that is a different major version + than the DB instance's current version. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. Changing this parameter does not result in + an outage except in the following case and the change is + asynchronously applied as soon as possible. An outage will result + if this parameter is set to `True` during the maintenance window, + and a newer minor version is available, and RDS has enabled auto + patching for that engine version. + + :type iops: integer + :param iops: The new Provisioned IOPS (I/O operations per second) value + for the RDS instance. Changing this parameter does not result in an + outage and the change is applied during the next maintenance window + unless the `ApplyImmediately` parameter is set to `True` for this + request. + Default: Uses existing setting + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. Changing this parameter + does not result in an outage except in the following case and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + the parameter change results in an option group that enables OEM, + this change can cause a brief (sub-second) period during which new + connections are rejected but existing connections are not + interrupted. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type new_db_instance_identifier: string + :param new_db_instance_identifier: + The new DB instance identifier for the DB instance when renaming a DB + Instance. This value is stored as a lowercase string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if allocated_storage is not None: + params['AllocatedStorage'] = allocated_storage + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if allow_major_version_upgrade is not None: + params['AllowMajorVersionUpgrade'] = str( + allow_major_version_upgrade).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if new_db_instance_identifier is not None: + params['NewDBInstanceIdentifier'] = new_db_instance_identifier + return self._make_request( + action='ModifyDBInstance', + verb='POST', + path='/', params=params) + + def modify_db_parameter_group(self, db_parameter_group_name, parameters): + """ + Modifies the parameters of a DB parameter group. To modify + more than one parameter, submit a list of the following: + `ParameterName`, `ParameterValue`, and `ApplyMethod`. A + maximum of 20 parameters can be modified in a single request. + + The `apply-immediate` method can be used only for dynamic + parameters; the `pending-reboot` method can be used with MySQL + and Oracle DB instances for either dynamic or static + parameters. For Microsoft SQL Server DB instances, the + `pending-reboot` method can be used only for static + parameters. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type parameters: list + :param parameters: + An array of parameter names, values, and the apply method for the + parameter update. At least one parameter name, value, and apply + method must be supplied; subsequent arguments are optional. A + maximum of 20 parameters may be modified in a single request. + + Valid Values (for the application method): `immediate | pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the pending-reboot value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ModifyDBParameterGroup', + verb='POST', + path='/', params=params) + + def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids, + db_subnet_group_description=None): + """ + Modifies an existing DB subnet group. DB subnet groups must + contain at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the DB subnet group. + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if db_subnet_group_description is not None: + params['DBSubnetGroupDescription'] = db_subnet_group_description + return self._make_request( + action='ModifyDBSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + event_categories=None, enabled=None): + """ + Modifies an existing RDS event notification subscription. Note + that you cannot modify the source identifiers using this call; + to change source identifiers for a subscription, use the + AddSourceIdentifierToSubscription and + RemoveSourceIdentifierFromSubscription calls. + + You can see a list of the event categories for a given + SourceType in the `Events`_ topic in the Amazon RDS User Guide + or by using the **DescribeEventCategories** action. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription. + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_option_group(self, option_group_name, options_to_include=None, + options_to_remove=None, apply_immediately=None): + """ + Modifies an existing option group. + + :type option_group_name: string + :param option_group_name: The name of the option group to be modified. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type options_to_include: list + :param options_to_include: Options in this list are added to the option + group or, if already present, the specified configuration is used + to update the existing configuration. + + :type options_to_remove: list + :param options_to_remove: Options in this list are removed from the + option group. + + :type apply_immediately: boolean + :param apply_immediately: Indicates whether the changes should be + applied immediately, or during the next maintenance window for each + instance associated with the option group. + + """ + params = {'OptionGroupName': option_group_name, } + if options_to_include is not None: + self.build_complex_list_params( + params, options_to_include, + 'OptionsToInclude.member', + ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')) + if options_to_remove is not None: + self.build_list_params(params, + options_to_remove, + 'OptionsToRemove.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + return self._make_request( + action='ModifyOptionGroup', + verb='POST', + path='/', params=params) + + def promote_read_replica(self, db_instance_identifier, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promotes a read replica DB instance to a standalone DB + instance. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This value + is stored as a lowercase string. + Constraints: + + + + Must be the identifier for an existing read replica DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: mydbinstance + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + return self._make_request( + action='PromoteReadReplica', + verb='POST', + path='/', params=params) + + def purchase_reserved_db_instances_offering(self, + reserved_db_instances_offering_id, + reserved_db_instance_id=None, + db_instance_count=None, + tags=None): + """ + Purchases a reserved DB instance offering. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The ID of the Reserved DB + instance offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: Customer-specified identifier to track + this reservation. + Example: myreservationID + + :type db_instance_count: integer + :param db_instance_count: The number of instances to reserve. + Default: `1` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id, + } + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if db_instance_count is not None: + params['DBInstanceCount'] = db_instance_count + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='PurchaseReservedDBInstancesOffering', + verb='POST', + path='/', params=params) + + def reboot_db_instance(self, db_instance_identifier, force_failover=None): + """ + Rebooting a DB instance restarts the database engine service. + A reboot also applies to the DB instance any modifications to + the associated DB parameter group that were pending. Rebooting + a DB instance results in a momentary outage of the instance, + during which the DB instance status is set to rebooting. If + the RDS instance is configured for MultiAZ, it is possible + that the reboot will be conducted through a failover. An + Amazon RDS event is created when the reboot is completed. + + If your DB instance is deployed in multiple Availability + Zones, you can force a failover from one AZ to the other + during the reboot. You might force a failover to test the + availability of your DB instance deployment or to restore + operations to the original AZ after a failover occurs. + + The time required to reboot is a function of the specific + database engine's crash recovery process. To improve the + reboot time, we recommend that you reduce database activities + as much as possible during the reboot process to reduce + rollback activity for in-transit transactions. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type force_failover: boolean + :param force_failover: When `True`, the reboot will be conducted + through a MultiAZ failover. + Constraint: You cannot specify `True` if the instance is not configured + for MultiAZ. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if force_failover is not None: + params['ForceFailover'] = str( + force_failover).lower() + return self._make_request( + action='RebootDBInstance', + verb='POST', + path='/', params=params) + + def remove_source_identifier_from_subscription(self, subscription_name, + source_identifier): + """ + Removes a source identifier from an existing RDS event + notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to remove a source identifier from. + + :type source_identifier: string + :param source_identifier: The source identifier to be removed from the + subscription, such as the **DB instance identifier** for a DB + instance or the name of a security group. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='RemoveSourceIdentifierFromSubscription', + verb='POST', + path='/', params=params) + + def remove_tags_from_resource(self, resource_name, tag_keys): + """ + Removes metadata tags from an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be removed + from. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tag_keys: list + :param tag_keys: The tag key (name) of the tag to be removed. + + """ + params = {'ResourceName': resource_name, } + self.build_list_params(params, + tag_keys, + 'TagKeys.member') + return self._make_request( + action='RemoveTagsFromResource', + verb='POST', + path='/', params=params) + + def reset_db_parameter_group(self, db_parameter_group_name, + reset_all_parameters=None, parameters=None): + """ + Modifies the parameters of a DB parameter group to the + engine/system default value. To reset specific parameters + submit a list of the following: `ParameterName` and + `ApplyMethod`. To reset the entire DB parameter group, specify + the `DBParameterGroup` name and `ResetAllParameters` + parameters. When resetting the entire group, dynamic + parameters are updated immediately and static parameters are + set to `pending-reboot` to take effect on the next DB instance + restart or `RebootDBInstance` request. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type reset_all_parameters: boolean + :param reset_all_parameters: Specifies whether ( `True`) or not ( + `False`) to reset all parameters in the DB parameter group to + default values. + Default: `True` + + :type parameters: list + :param parameters: An array of parameter names, values, and the apply + method for the parameter update. At least one parameter name, + value, and apply method must be supplied; subsequent arguments are + optional. A maximum of 20 parameters may be modified in a single + request. + **MySQL** + + Valid Values (for Apply method): `immediate` | `pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the `pending-reboot` value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + **Oracle** + + Valid Values (for Apply method): `pending-reboot` + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ResetDBParameterGroup', + verb='POST', + path='/', params=params) + + def restore_db_instance_from_db_snapshot(self, db_instance_identifier, + db_snapshot_identifier, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Creates a new DB instance from a DB snapshot. The target + database is created from the source database restore point + with the same configuration as the original source database, + except that the new RDS instance is created with the default + security group. + + :type db_instance_identifier: string + :param db_instance_identifier: + The identifier for the DB snapshot to restore from. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: Name of the DB instance to create from + the DB snapshot. This parameter isn't case sensitive. + Constraints: + + + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type port: integer + :param port: The port number on which the database accepts connections. + Default: The same port as the original DB instance + + Constraints: Value must be `1150-65535` + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter doesn't apply to the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: Specifies the amount of provisioned IOPS for the DB + instance, expressed in I/O operations per second. If this parameter + is not specified, the IOPS value will be taken from the backup. If + this parameter is set to 0, the new instance will be converted to a + non-PIOPS instance, which will take additional time, though your DB + instance will be available for connections before the conversion + starts. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'DBSnapshotIdentifier': db_snapshot_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceFromDBSnapshot', + verb='POST', + path='/', params=params) + + def restore_db_instance_to_point_in_time(self, + source_db_instance_identifier, + target_db_instance_identifier, + restore_time=None, + use_latest_restorable_time=None, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Restores a DB instance to an arbitrary point-in-time. Users + can restore to any point in time before the + latestRestorableTime for up to backupRetentionPeriod days. The + target database is created from the source database with the + same configuration as the original database except that the DB + instance is created with the default DB security group. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: + The identifier of the source DB instance from which to restore. + + Constraints: + + + + Must be the identifier of an existing database instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type target_db_instance_identifier: string + :param target_db_instance_identifier: + The name of the new database instance to be created. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type restore_time: timestamp + :param restore_time: The date and time to restore from. + Valid Values: Value must be a UTC time + + Constraints: + + + + Must be before the latest restorable time for the DB instance + + Cannot be specified if UseLatestRestorableTime parameter is true + + + Example: `2009-09-07T23:45:00Z` + + :type use_latest_restorable_time: boolean + :param use_latest_restorable_time: Specifies whether ( `True`) or not ( + `False`) the DB instance is restored from the latest backup time. + Default: `False` + + Constraints: Cannot be specified if RestoreTime parameter is provided. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + Default: The same DBInstanceClass as the original DB instance. + + :type port: integer + :param port: The port number on which the database accepts connections. + Constraints: Value must be `1150-65535` + + Default: The same port as the original DB instance. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to true. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter is not used for the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + 'TargetDBInstanceIdentifier': target_db_instance_identifier, + } + if restore_time is not None: + params['RestoreTime'] = restore_time + if use_latest_restorable_time is not None: + params['UseLatestRestorableTime'] = str( + use_latest_restorable_time).lower() + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceToPointInTime', + verb='POST', + path='/', params=params) + + def revoke_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Revokes ingress from a DBSecurityGroup for previously + authorized IP ranges or EC2 or VPC Security Groups. Required + parameters for this API are one of CIDRIP, EC2SecurityGroupId + for VPC, or (EC2SecurityGroupOwnerId and either + EC2SecurityGroupName or EC2SecurityGroupId). + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + revoke ingress from. + + :type cidrip: string + :param cidrip: The IP range to revoke access from. Must be a valid CIDR + range. If `CIDRIP` is specified, `EC2SecurityGroupName`, + `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be + provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: The id of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS Account Number of the owner + of the EC2 security group specified in the `EC2SecurityGroupName` + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff -Nru python-boto-2.20.1/boto/redshift/__init__.py python-boto-2.29.1/boto/redshift/__init__.py --- python-boto-2.20.1/boto/redshift/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/redshift/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,27 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.redshift.layer1 import RedshiftConnection - cls = RedshiftConnection - return [ - RegionInfo(name='us-east-1', - endpoint='redshift.us-east-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-2', - endpoint='redshift.us-west-2.amazonaws.com', - connection_cls=cls), - RegionInfo(name='eu-west-1', - endpoint='redshift.eu-west-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-northeast-1', - endpoint='redshift.ap-northeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-1', - endpoint='redshift.ap-southeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-2', - endpoint='redshift.ap-southeast-2.amazonaws.com', - connection_cls=cls), - ] + return get_regions('redshift', connection_cls=RedshiftConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/redshift/layer1.py python-boto-2.29.1/boto/redshift/layer1.py --- python-boto-2.20.1/boto/redshift/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/redshift/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -142,7 +142,7 @@ self.DefaultRegionEndpoint) if 'host' not in kwargs: kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(RedshiftConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): diff -Nru python-boto-2.20.1/boto/regioninfo.py python-boto-2.29.1/boto/regioninfo.py --- python-boto-2.20.1/boto/regioninfo.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,6 +20,131 @@ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +from __future__ import with_statement +import os + +import boto +from boto.compat import json +from boto.exception import BotoClientError + + +def load_endpoint_json(path): + """ + Loads a given JSON file & returns it. + + :param path: The path to the JSON file + :type path: string + + :returns: The loaded data + """ + with open(path, 'r') as endpoints_file: + return json.load(endpoints_file) + + +def merge_endpoints(defaults, additions): + """ + Given an existing set of endpoint data, this will deep-update it with + any similarly structured data in the additions. + + :param defaults: The existing endpoints data + :type defaults: dict + + :param defaults: The additional endpoints data + :type defaults: dict + + :returns: The modified endpoints data + :rtype: dict + """ + # We can't just do an ``defaults.update(...)`` here, as that could + # *overwrite* regions if present in both. + # We'll iterate instead, essentially doing a deeper merge. + for service, region_info in additions.items(): + # Set the default, if not present, to an empty dict. + defaults.setdefault(service, {}) + defaults[service].update(region_info) + + return defaults + + +def load_regions(): + """ + Actually load the region/endpoint information from the JSON files. + + By default, this loads from the default included ``boto/endpoints.json`` + file. + + Users can override/extend this by supplying either a ``BOTO_ENDPOINTS`` + environment variable or a ``endpoints_path`` config variable, either of + which should be an absolute path to the user's JSON file. + + :returns: The endpoints data + :rtype: dict + """ + # Load the defaults first. + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + additional_path = None + + # Try the ENV var. If not, check the config file. + if os.environ.get('BOTO_ENDPOINTS'): + additional_path = os.environ['BOTO_ENDPOINTS'] + elif boto.config.get('Boto', 'endpoints_path'): + additional_path = boto.config.get('Boto', 'endpoints_path') + + # If there's a file provided, we'll load it & additively merge it into + # the endpoints. + if additional_path: + additional = load_endpoint_json(additional_path) + endpoints = merge_endpoints(endpoints, additional) + + return endpoints + + +def get_regions(service_name, region_cls=None, connection_cls=None): + """ + Given a service name (like ``ec2``), returns a list of ``RegionInfo`` + objects for that service. + + This leverages the ``endpoints.json`` file (+ optional user overrides) to + configure/construct all the objects. + + :param service_name: The name of the service to construct the ``RegionInfo`` + objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. + :type service_name: string + + :param region_cls: (Optional) The class to use when constructing. By + default, this is ``RegionInfo``. + :type region_cls: class + + :param connection_cls: (Optional) The connection class for the + ``RegionInfo`` object. Providing this allows the ``connect`` method on + the ``RegionInfo`` to work. Default is ``None`` (no connection). + :type connection_cls: class + + :returns: A list of configured ``RegionInfo`` objects + :rtype: list + """ + endpoints = load_regions() + + if not service_name in endpoints: + raise BotoClientError( + "Service '%s' not found in endpoints." % service_name + ) + + if region_cls is None: + region_cls = RegionInfo + + region_objs = [] + + for region_name, endpoint in endpoints.get(service_name, {}).items(): + region_objs.append( + region_cls( + name=region_name, + endpoint=endpoint, + connection_cls=connection_cls + ) + ) + + return region_objs class RegionInfo(object): diff -Nru python-boto-2.20.1/boto/requestlog.py python-boto-2.29.1/boto/requestlog.py --- python-boto-2.20.1/boto/requestlog.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/requestlog.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,39 @@ + +from datetime import datetime +from threading import Thread +import Queue + +from boto.utils import RequestHook + +class RequestLogger(RequestHook): + """ + This class implements a request logger that uses a single thread to + write to a log file. + """ + def __init__(self, filename='/tmp/request_log.csv'): + self.request_log_file = open(filename, 'w') + self.request_log_queue = Queue.Queue(100) + Thread(target=self._request_log_worker).start() + + + def handle_request_data(self, request, response, error=False): + len = 0 if error else response.getheader('Content-Length') + now = datetime.now() + time = now.strftime('%Y-%m-%d %H:%M:%S') + td = (now - request.start_time) + duration = (td.microseconds + long(td.seconds + td.days*24*3600) * 1e6) / 1e6 + + # write output including timestamp, status code, response time, response size, request action + self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action'])) + + + def _request_log_worker(self): + while True: + try: + item = self.request_log_queue.get(True) + self.request_log_file.write(item) + self.request_log_file.flush() + self.request_log_queue.task_done() + except: + import traceback; traceback.print_exc(file=sys.stdout) + diff -Nru python-boto-2.20.1/boto/resultset.py python-boto-2.29.1/boto/resultset.py --- python-boto-2.20.1/boto/resultset.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/resultset.py 2014-05-30 20:49:34.000000000 +0000 @@ -117,6 +117,11 @@ self.append(value) elif name == 'NextToken': self.next_token = value + elif name == 'nextToken': + self.next_token = value + # Code exists which expects nextToken to be available, so we + # set it here to remain backwards-compatibile. + self.nextToken = value elif name == 'BoxUsage': try: connection.box_usage += float(value) diff -Nru python-boto-2.20.1/boto/roboto/awsqueryrequest.py python-boto-2.29.1/boto/roboto/awsqueryrequest.py --- python-boto-2.20.1/boto/roboto/awsqueryrequest.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/roboto/awsqueryrequest.py 2014-05-30 20:49:34.000000000 +0000 @@ -77,22 +77,22 @@ def __init__(self, required): self.required = required s = 'Required parameters are missing: %s' % self.required - boto.exception.BotoClientError.__init__(self, s) + super(RequiredParamError, self).__init__(s) class EncoderError(boto.exception.BotoClientError): def __init__(self, error_msg): s = 'Error encoding value (%s)' % error_msg - boto.exception.BotoClientError.__init__(self, s) - + super(EncoderError, self).__init__(s) + class FilterError(boto.exception.BotoClientError): def __init__(self, filters): self.filters = filters s = 'Unknown filters: %s' % self.filters - boto.exception.BotoClientError.__init__(self, s) - -class Encoder: + super(FilterError, self).__init__(s) + +class Encoder(object): @classmethod def encode(cls, p, rp, v, label=None): @@ -103,7 +103,7 @@ mthd(p, rp, v, label) except AttributeError: raise EncoderError('Unknown type: %s' % p.ptype) - + @classmethod def encode_string(cls, p, rp, v, l): if l: @@ -122,7 +122,7 @@ else: label = p.name rp[label] = '%d' % v - + @classmethod def encode_boolean(cls, p, rp, v, l): if l: @@ -134,7 +134,7 @@ else: v = 'false' rp[label] = v - + @classmethod def encode_datetime(cls, p, rp, v, l): if l: @@ -142,7 +142,7 @@ else: label = p.name rp[label] = v - + @classmethod def encode_array(cls, p, rp, v, l): v = boto.utils.mklist(v) @@ -153,7 +153,7 @@ label = label + '.%d' for i, value in enumerate(v): rp[label%(i+1)] = value - + class AWSQueryRequest(object): ServiceClass = None @@ -290,7 +290,7 @@ elif fmt and fmt['type'] == 'array': self.list_markers.append(prev_name) self.item_markers.append(fmt['name']) - + def send(self, verb='GET', **args): self.process_args(**args) self.process_filters() @@ -371,7 +371,7 @@ if a.doc: s += '\n\n\t%s - %s' % (a.long_name, a.doc) return s - + def build_cli_parser(self): self.parser = optparse.OptionParser(description=self.Description, usage=self.get_usage()) diff -Nru python-boto-2.20.1/boto/roboto/awsqueryservice.py python-boto-2.29.1/boto/roboto/awsqueryservice.py --- python-boto-2.20.1/boto/roboto/awsqueryservice.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/roboto/awsqueryservice.py 2014-05-30 20:49:34.000000000 +0000 @@ -10,7 +10,7 @@ def __init__(self): s = 'Unable to find credentials' - boto.exception.BotoClientError.__init__(self, s) + super(NoCredentialsError, self).__init__(s) class AWSQueryService(boto.connection.AWSQueryConnection): @@ -41,7 +41,7 @@ if 'port' not in self.args: self.args['port'] = self.Port try: - boto.connection.AWSQueryConnection.__init__(self, **self.args) + super(AWSQueryService, self).__init__(**self.args) self.aws_response = None except boto.exception.NoAuthHandlerFound: raise NoCredentialsError() @@ -115,7 +115,7 @@ if rslt.path and 'path' not in self.args: self.args['path'] = rslt.path - + def _required_auth_capability(self): return [self.Authentication] - + diff -Nru python-boto-2.20.1/boto/roboto/param.py python-boto-2.29.1/boto/roboto/param.py --- python-boto-2.20.1/boto/roboto/param.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/roboto/param.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,7 +23,7 @@ import os class Converter(object): - + @classmethod def convert_string(cls, param, value): # TODO: could do length validation, etc. here @@ -35,7 +35,7 @@ def convert_integer(cls, param, value): # TODO: could do range checking here return int(value) - + @classmethod def convert_boolean(cls, param, value): """ @@ -43,19 +43,19 @@ of the option means True so just return True """ return True - + @classmethod def convert_file(cls, param, value): - if os.path.isfile(value): + if os.path.exists(value) and not os.path.isdir(value): return value raise ValueError - + @classmethod def convert_dir(cls, param, value): if os.path.isdir(value): return value raise ValueError - + @classmethod def convert(cls, param, value): try: @@ -66,8 +66,8 @@ return mthd(param, value) except: raise ValidationException(param, '') - -class Param(object): + +class Param(Converter): def __init__(self, name=None, ptype='string', optional=True, short_name=None, long_name=None, doc='', @@ -142,6 +142,6 @@ :param value: The value to convert. This should always be a string. """ - return Converter.convert(self, value) + return super(Param, self).convert(self,value) diff -Nru python-boto-2.20.1/boto/route53/connection.py python-boto-2.29.1/boto/route53/connection.py --- python-boto-2.20.1/boto/route53/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/route53/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -54,22 +54,24 @@ DefaultHost = 'route53.amazonaws.com' """The default Route53 API endpoint to connect to.""" - Version = '2012-02-29' + Version = '2013-04-01' """Route53 API version.""" - XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/' + XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/' """XML schema for this Route53 API version.""" def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, - validate_certs=True, https_connection_factory=None): - AWSAuthConnection.__init__(self, host, + validate_certs=True, https_connection_factory=None, + profile_name=None): + super(Route53Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, True, port, proxy, proxy_port, debug=debug, security_token=security_token, validate_certs=validate_certs, - https_connection_factory=https_connection_factory) + https_connection_factory=https_connection_factory, + profile_name=profile_name) def _required_auth_capability(self): return ['route53'] @@ -82,7 +84,7 @@ continue pairs.append(key + '=' + urllib.quote(str(val))) path += '?' + '&'.join(pairs) - return AWSAuthConnection.make_request(self, action, path, + return super(Route53Connection, self).make_request(action, path, headers, data, retry_handler=self._retry_handler) @@ -224,6 +226,101 @@ h.parse(body) return e + + # Health checks + + POSTHCXMLBody = """ + %(caller_ref)s + %(health_check)s + """ + + def create_health_check(self, health_check, caller_ref=None): + """ + Create a new Health Check + + :type health_check: HealthCheck + :param health_check: HealthCheck object + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHealthCheckRequest requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % self.Version + params = {'xmlns': self.XMLNameSpace, + 'caller_ref': caller_ref, + 'health_check': health_check.to_xml() + } + xml_body = self.POSTHCXMLBody % params + response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + + def get_list_health_checks(self, maxitems=None, marker=None): + """ + Return a list of health checks + + :type maxitems: int + :param maxitems: Maximum number of items to return + + :type marker: str + :param marker: marker to get next set of items to list + + """ + + params = {} + if maxitems is not None: + params['maxitems'] = maxitems + if marker is not None: + params['marker'] = marker + + uri = '/%s/healthcheck' % (self.Version, ) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HealthChecks', item_marker=('HealthCheck',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def delete_health_check(self, health_check_id): + """ + Delete a health check + + :type health_check_id: str + :param health_check_id: ID of the health check to delete + + """ + uri = '/%s/healthcheck/%s' % (self.Version, health_check_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Resource Record Sets def get_all_rrsets(self, hosted_zone_id, type=None, diff -Nru python-boto-2.20.1/boto/route53/healthcheck.py python-boto-2.29.1/boto/route53/healthcheck.py --- python-boto-2.20.1/boto/route53/healthcheck.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/route53/healthcheck.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,141 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +""" +From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html + +POST /2013-04-01/healthcheck HTTP/1.1 + + + + unique description + + IP address of the endpoint to check + port on the endpoint to check + HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + path of the file that + you want Amazon Route 53 to request + domain name of the + endpoint to check + if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, + the string to search for in the response body + from the specified resource + 10 | 30 + integer between 1 and 10 + + +""" + + +class HealthCheck(object): + + """An individual health check""" + + POSTXMLBody = """ + + %(ip_addr)s + %(port)s + %(type)s + %(resource_path)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + %(failure_threshold)s + + """ + + XMLFQDNPart = """%(fqdn)s""" + + XMLStringMatchPart = """%(string_match)s""" + + XMLRequestIntervalPart = """%(request_interval)d""" + + valid_request_intervals = (10, 30) + + def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3): + """ + HealthCheck object + + :type ip_addr: str + :param ip_addr: IP Address + + :type port: int + :param port: Port to check + + :type hc_type: str + :param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + + :type resource_path: str + :param resource_path: Path to check + + :type fqdn: str + :param fqdn: domain name of the endpoint to check + + :type string_match: str + :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource + + :type request_interval: int + :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request. + + :type failure_threshold: int + :param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. + + """ + self.ip_addr = ip_addr + self.port = port + self.hc_type = hc_type + self.resource_path = resource_path + self.fqdn = fqdn + self.string_match = string_match + self.failure_threshold = failure_threshold + + if request_interval in self.valid_request_intervals: + self.request_interval = request_interval + else: + raise AttributeError( + "Valid values for request_interval are: %s" % + ",".join(str(i) for i in self.valid_request_intervals)) + + if failure_threshold < 1 or failure_threshold > 10: + raise AttributeError( + 'Valid values for failure_threshold are 1 - 10.') + + def to_xml(self): + params = { + 'ip_addr': self.ip_addr, + 'port': self.port, + 'type': self.hc_type, + 'resource_path': self.resource_path, + 'fqdn_part': "", + 'string_match_part': "", + 'request_interval': (self.XMLRequestIntervalPart % + {'request_interval': self.request_interval}), + 'failure_threshold': self.failure_threshold, + } + if self.fqdn is not None: + params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn} + + if self.string_match is not None: + params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match} + + return self.POSTXMLBody % params diff -Nru python-boto-2.20.1/boto/route53/__init__.py python-boto-2.29.1/boto/route53/__init__.py --- python-boto-2.20.1/boto/route53/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/route53/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,8 +23,8 @@ # this is here for backward compatibility # originally, the Route53Connection class was defined here -from connection import Route53Connection -from boto.regioninfo import RegionInfo +from boto.route53.connection import Route53Connection +from boto.regioninfo import RegionInfo, get_regions class Route53RegionInfo(RegionInfo): @@ -51,10 +51,22 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [Route53RegionInfo(name='universal', - endpoint='route53.amazonaws.com', - connection_cls=Route53Connection) - ] + regions = get_regions( + 'route53', + region_cls=Route53RegionInfo, + connection_cls=Route53Connection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + Route53RegionInfo( + name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection + ) + ) + + return regions def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/route53/record.py python-boto-2.29.1/boto/route53/record.py --- python-boto-2.20.1/boto/route53/record.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/route53/record.py 2014-05-30 20:49:34.000000000 +0000 @@ -35,7 +35,7 @@ """ ChangeResourceRecordSetsBody = """ - + %(comment)s %(changes)s @@ -54,7 +54,7 @@ self.changes = [] self.next_record_name = None self.next_record_type = None - ResultSet.__init__(self, [('ResourceRecordSet', Record)]) + super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)]) def __repr__(self): if self.changes: @@ -66,12 +66,13 @@ def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, - weight=None, region=None): + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None, failover=None): """ Add a change request to the set. :type action: str - :param action: The action to perform ('CREATE'|'DELETE') + :param action: The action to perform ('CREATE'|'DELETE'|'UPSERT') :type name: str :param name: The name of the domain you want to perform the action on. @@ -118,11 +119,26 @@ record sets that have the same combination of DNS name and type, a value that determines which region this should be associated with for the latency-based routing + + :type alias_evaluate_target_health: Boolean + :param alias_evaluate_target_health: *Required for alias resource record sets* Indicates + whether this Resource Record Set should respect the health status of + any health checks associated with the ALIAS target record which it is + linked to. + + :type health_check: str + :param health_check: Health check to associate with this record + + :type failover: str + :param failover: *Failover resource record sets only* Whether this is the + primary or secondary resource record set. """ change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name, identifier=identifier, - weight=weight, region=region) + weight=weight, region=region, + alias_evaluate_target_health=alias_evaluate_target_health, + health_check=health_check, failover=failover) self.changes.append([action, change]) return change @@ -156,11 +172,11 @@ elif name == 'NextRecordType': self.next_record_type = value else: - return ResultSet.endElement(self, name, value, connection) + return super(ResourceRecordSets, self).endElement(name, value, connection) def __iter__(self): """Override the next function to support paging""" - results = ResultSet.__iter__(self) + results = super(ResourceRecordSets, self).__iter__() truncated = self.is_truncated while results: for obj in results: @@ -178,11 +194,14 @@ class Record(object): """An individual ResourceRecordSet""" + HealthCheckBody = """%s""" + XMLBody = """ %(name)s %(type)s %(weight)s %(body)s + %(health_check)s """ WRRBody = """ @@ -194,6 +213,11 @@ %(identifier)s %(region)s """ + + FailoverBody = """ + %(identifier)s + %(failover)s + """ ResourceRecordsBody = """ %(ttl)s @@ -206,19 +230,22 @@ """ AliasBody = """ - %s - %s + %(hosted_zone_id)s + %(dns_name)s + %(eval_target_health)s """ + EvaluateTargetHealth = """%s""" def __init__(self, name=None, type=None, ttl=600, resource_records=None, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, - weight=None, region=None): + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None, failover=None): self.name = name self.type = type self.ttl = ttl - if resource_records == None: + if resource_records is None: resource_records = [] self.resource_records = resource_records self.alias_hosted_zone_id = alias_hosted_zone_id @@ -226,6 +253,9 @@ self.identifier = identifier self.weight = weight self.region = region + self.alias_evaluate_target_health = alias_evaluate_target_health + self.health_check = health_check + self.failover = failover def __repr__(self): return '' % (self.name, self.type, self.to_print()) @@ -234,16 +264,25 @@ """Add a resource record value""" self.resource_records.append(value) - def set_alias(self, alias_hosted_zone_id, alias_dns_name): + def set_alias(self, alias_hosted_zone_id, alias_dns_name, + alias_evaluate_target_health=False): """Make this an alias resource record set""" self.alias_hosted_zone_id = alias_hosted_zone_id self.alias_dns_name = alias_dns_name + self.alias_evaluate_target_health = alias_evaluate_target_health def to_xml(self): """Spit this resource record set out as XML""" - if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: # Use alias - body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name) + if self.alias_evaluate_target_health is not None: + eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false') + else: + eval_target_health = "" + + body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id, + "dns_name": self.alias_dns_name, + "eval_target_health": eval_target_health } else: # Use resource record(s) records = "" @@ -258,34 +297,46 @@ weight = "" - if self.identifier != None and self.weight != None: + if self.identifier is not None and self.weight is not None: weight = self.WRRBody % {"identifier": self.identifier, "weight": self.weight} - elif self.identifier != None and self.region != None: + elif self.identifier is not None and self.region is not None: weight = self.RRRBody % {"identifier": self.identifier, "region": self.region} + elif self.identifier is not None and self.failover is not None: + weight = self.FailoverBody % {"identifier": self.identifier, "failover": + self.failover} + + health_check = "" + if self.health_check is not None: + health_check = self.HealthCheckBody % (self.health_check) params = { "name": self.name, "type": self.type, "weight": weight, "body": body, + "health_check": health_check } return self.XMLBody % params def to_print(self): rr = "" - if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: # Show alias rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name + if self.alias_evaluate_target_health is not None: + rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health else: # Show resource record(s) rr = ",".join(self.resource_records) - if self.identifier != None and self.weight != None: + if self.identifier is not None and self.weight is not None: rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) - elif self.identifier != None and self.region != None: + elif self.identifier is not None and self.region is not None: rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) + elif self.identifier is not None and self.failover is not None: + rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover) return rr @@ -304,10 +355,14 @@ self.alias_dns_name = value elif name == 'SetIdentifier': self.identifier = value + elif name == 'EvaluateTargetHealth': + self.alias_evaluate_target_health = value.lower() == 'true' elif name == 'Weight': self.weight = value elif name == 'Region': self.region = value + elif name == 'Failover': + self.failover = value def startElement(self, name, attrs, connection): return None diff -Nru python-boto-2.20.1/boto/route53/zone.py python-boto-2.29.1/boto/route53/zone.py --- python-boto-2.20.1/boto/route53/zone.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/route53/zone.py 2014-05-30 20:49:34.000000000 +0000 @@ -34,8 +34,8 @@ """ A Route53 Zone. - :ivar Route53Connection route53connection - :ivar str Id: The ID of the hosted zone. + :ivar route53connection: A :class:`boto.route53.connection.Route53Connection` connection + :ivar id: The ID of the hosted zone """ def __init__(self, route53connection, zone_dict): self.route53connection = route53connection diff -Nru python-boto-2.20.1/boto/s3/acl.py python-boto-2.29.1/boto/s3/acl.py --- python-boto-2.20.1/boto/s3/acl.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/acl.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -28,7 +28,7 @@ 'log-delivery-write'] -class Policy: +class Policy(object): def __init__(self, parent=None): self.parent = parent @@ -74,7 +74,7 @@ s += '' return s -class ACL: +class ACL(object): def __init__(self, policy=None): self.policy = policy @@ -111,8 +111,8 @@ s += grant.to_xml() s += '' return s - -class Grant: + +class Grant(object): NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' @@ -160,5 +160,5 @@ s += '%s' % self.permission s += '' return s - - + + diff -Nru python-boto-2.20.1/boto/s3/bucketlistresultset.py python-boto-2.29.1/boto/s3/bucketlistresultset.py --- python-boto-2.20.1/boto/s3/bucketlistresultset.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/bucketlistresultset.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,12 +14,13 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None): +def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): """ A generator function for listing keys in a bucket. """ @@ -27,14 +28,15 @@ k = None while more_results: rs = bucket.get_all_keys(prefix=prefix, marker=marker, - delimiter=delimiter, headers=headers) + delimiter=delimiter, headers=headers, + encoding_type=encoding_type) for k in rs: yield k if k: marker = rs.next_marker or k.name more_results= rs.is_truncated - -class BucketListResultSet: + +class BucketListResultSet(object): """ A resultset for listing keys within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This @@ -43,20 +45,24 @@ keys in a reasonably efficient manner. """ - def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None): + def __init__(self, bucket=None, prefix='', delimiter='', marker='', + headers=None, encoding_type=None): self.bucket = bucket self.prefix = prefix self.delimiter = delimiter self.marker = marker self.headers = headers + self.encoding_type = encoding_type def __iter__(self): return bucket_lister(self.bucket, prefix=self.prefix, delimiter=self.delimiter, marker=self.marker, - headers=self.headers) + headers=self.headers, + encoding_type=self.encoding_type) def versioned_bucket_lister(bucket, prefix='', delimiter='', - key_marker='', version_id_marker='', headers=None): + key_marker='', version_id_marker='', headers=None, + encoding_type=None): """ A generator function for listing versions in a bucket. """ @@ -66,14 +72,14 @@ rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, version_id_marker=version_id_marker, delimiter=delimiter, headers=headers, - max_keys=999) + max_keys=999, encoding_type=encoding_type) for k in rs: yield k key_marker = rs.next_key_marker version_id_marker = rs.next_version_id_marker more_results= rs.is_truncated - -class VersionedBucketListResultSet: + +class VersionedBucketListResultSet(object): """ A resultset for listing versions within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This @@ -83,24 +89,26 @@ """ def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', - version_id_marker='', headers=None): + version_id_marker='', headers=None, encoding_type=None): self.bucket = bucket self.prefix = prefix self.delimiter = delimiter self.key_marker = key_marker self.version_id_marker = version_id_marker self.headers = headers + self.encoding_type = encoding_type def __iter__(self): return versioned_bucket_lister(self.bucket, prefix=self.prefix, delimiter=self.delimiter, key_marker=self.key_marker, version_id_marker=self.version_id_marker, - headers=self.headers) + headers=self.headers, + encoding_type=self.encoding_type) def multipart_upload_lister(bucket, key_marker='', upload_id_marker='', - headers=None): + headers=None, encoding_type=None): """ A generator function for listing multipart uploads in a bucket. """ @@ -109,14 +117,15 @@ while more_results: rs = bucket.get_all_multipart_uploads(key_marker=key_marker, upload_id_marker=upload_id_marker, - headers=headers) + headers=headers, + encoding_type=encoding_type) for k in rs: yield k key_marker = rs.next_key_marker upload_id_marker = rs.next_upload_id_marker more_results= rs.is_truncated - -class MultiPartUploadListResultSet: + +class MultiPartUploadListResultSet(object): """ A resultset for listing multipart uploads within a bucket. Uses the multipart_upload_lister generator function and @@ -126,14 +135,16 @@ keys in a reasonably efficient manner. """ def __init__(self, bucket=None, key_marker='', - upload_id_marker='', headers=None): + upload_id_marker='', headers=None, encoding_type=None): self.bucket = bucket self.key_marker = key_marker self.upload_id_marker = upload_id_marker self.headers = headers + self.encoding_type = encoding_type def __iter__(self): return multipart_upload_lister(self.bucket, key_marker=self.key_marker, upload_id_marker=self.upload_id_marker, - headers=self.headers) + headers=self.headers, + encoding_type=self.encoding_type) diff -Nru python-boto-2.20.1/boto/s3/bucketlogging.py python-boto-2.29.1/boto/s3/bucketlogging.py --- python-boto-2.20.1/boto/s3/bucketlogging.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/bucketlogging.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -22,7 +22,7 @@ import xml.sax.saxutils from acl import Grant -class BucketLogging: +class BucketLogging(object): def __init__(self, target=None, prefix=None, grants=None): self.target = target @@ -68,7 +68,7 @@ # caller is responsible to encode to utf-8 s = u'' s += u'' - if self.target is not None: + if self.target is not None: s += u'' s += u'%s' % self.target prefix = self.prefix or '' diff -Nru python-boto-2.20.1/boto/s3/bucket.py python-boto-2.29.1/boto/s3/bucket.py --- python-boto-2.20.1/boto/s3/bucket.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/bucket.py 2014-05-30 20:49:34.000000000 +0000 @@ -54,7 +54,7 @@ # as per http://goo.gl/BDuud (02/19/2011) -class S3WebsiteEndpointTranslate: +class S3WebsiteEndpointTranslate(object): trans_region = defaultdict(lambda: 's3-website-us-east-1') trans_region['eu-west-1'] = 's3-website-eu-west-1' @@ -64,6 +64,7 @@ trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2' + trans_region['cn-north-1'] = 's3-website.cn-north-1' @classmethod def translate_region(self, reg): @@ -142,24 +143,46 @@ return self.get_key(key_name, headers=headers) def get_key(self, key_name, headers=None, version_id=None, - response_headers=None): + response_headers=None, validate=True): """ Check to see if a particular key exists within the bucket. This method uses a HEAD request to check for the existance of the key. Returns: An instance of a Key object or None - :type key_name: string :param key_name: The name of the key to retrieve + :type key_name: string + + :param headers: The headers to send when retrieving the key + :type headers: dict + + :param version_id: + :type version_id: string - :type response_headers: dict :param response_headers: A dictionary containing HTTP headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + :type response_headers: dict + + :param validate: Verifies whether the key exists. If ``False``, this + will not hit the service, constructing an in-memory object. + Default is ``True``. + :type validate: bool :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ + if validate is False: + if headers or version_id or response_headers: + raise BotoClientError( + "When providing 'validate=False', no other params " + \ + "are allowed." + ) + + # This leans on the default behavior of ``new_key`` (not hitting + # the service). If that changes, that behavior should migrate here. + return self.new_key(key_name) + query_args_l = [] if version_id: query_args_l.append('versionId=%s' % version_id) @@ -211,7 +234,8 @@ raise self.connection.provider.storage_response_error( response.status, response.reason, '') - def list(self, prefix='', delimiter='', marker='', headers=None): + def list(self, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result @@ -243,13 +267,26 @@ :type marker: string :param marker: The "marker" of where you are in the result set + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ - return BucketListResultSet(self, prefix, delimiter, marker, headers) + return BucketListResultSet(self, prefix, delimiter, marker, headers, + encoding_type=encoding_type) def list_versions(self, prefix='', delimiter='', key_marker='', - version_id_marker='', headers=None): + version_id_marker='', headers=None, encoding_type=None): """ List version objects within a bucket. This returns an instance of an VersionedBucketListResultSet that automatically @@ -273,34 +310,63 @@ for more details. - :type marker: string - :param marker: The "marker" of where you are in the result set + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return VersionedBucketListResultSet(self, prefix, delimiter, key_marker, version_id_marker, - headers) + headers, + encoding_type=encoding_type) def list_multipart_uploads(self, key_marker='', upload_id_marker='', - headers=None): + headers=None, encoding_type=None): """ List multipart upload objects within a bucket. This returns an instance of an MultiPartUploadListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. - :type marker: string - :param marker: The "marker" of where you are in the result set + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :type upload_id_marker: string + :param upload_id_marker: The upload identifier + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return MultiPartUploadListResultSet(self, key_marker, upload_id_marker, - headers) + headers, + encoding_type=encoding_type) def _get_all_query_args(self, params, initial_query_string=''): pairs = [] @@ -381,12 +447,25 @@ element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + :rtype: ResultSet :return: The result from S3 listing the keys requested """ self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', - 'marker', 'delimiter']) + 'marker', 'delimiter', + 'encoding_type']) return self._get_all([('Contents', self.key_class), ('CommonPrefixes', Prefix)], '', headers, **params) @@ -421,6 +500,18 @@ element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + :rtype: ResultSet :return: The result from S3 listing the keys requested """ @@ -440,7 +531,7 @@ """ self.validate_kwarg_names( params, ['maxkeys', 'max_keys', 'prefix', 'key_marker', - 'version_id_marker', 'delimiter']) + 'version_id_marker', 'delimiter', 'encoding_type']) def get_all_multipart_uploads(self, headers=None, **params): """ @@ -476,12 +567,42 @@ list only if they have an upload ID lexicographically greater than the specified upload_id_marker. + :type encoding_type: string + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + + :type delimiter: string + :param delimiter: Character you use to group keys. + All keys that contain the same string between the prefix, if + specified, and the first occurrence of the delimiter after the + prefix are grouped under a single result element, CommonPrefixes. + If you don't specify the prefix parameter, then the substring + starts at the beginning of the key. The keys that are grouped + under CommonPrefixes result element are not returned elsewhere + in the response. + + :type prefix: string + :param prefix: Lists in-progress uploads only for those keys that + begin with the specified prefix. You can use prefixes to separate + a bucket into different grouping of keys. (You can think of using + prefix to make groups in the same way you'd use a folder in a + file system.) + :rtype: ResultSet :return: The result from S3 listing the uploads requested """ self.validate_kwarg_names(params, ['max_uploads', 'key_marker', - 'upload_id_marker']) + 'upload_id_marker', 'encoding_type', + 'delimiter', 'prefix']) return self._get_all([('Upload', MultiPartUpload), ('CommonPrefixes', Prefix)], 'uploads', headers, **params) @@ -1549,6 +1670,15 @@ """ Start a multipart upload operation. + .. note:: + + Note: After you initiate multipart upload and upload one or more + parts, you must either complete or abort multipart upload in order + to stop getting charged for storage of the uploaded parts. Only + after you either complete or abort multipart upload, Amazon S3 + frees up the parts storage and stops charging you for the parts + storage. + :type key_name: string :param key_name: The name of the key that will ultimately result from this multipart upload operation. This will be @@ -1649,6 +1779,11 @@ response.status, response.reason, body) def cancel_multipart_upload(self, key_name, upload_id, headers=None): + """ + To verify that all parts have been removed, so you don't get charged + for the part storage, you should call the List Parts operation and + ensure the parts list is empty. + """ query_args = 'uploadId=%s' % upload_id response = self.connection.make_request('DELETE', self.name, key_name, query_args=query_args, diff -Nru python-boto-2.20.1/boto/s3/connection.py python-boto-2.29.1/boto/s3/connection.py --- python-boto-2.20.1/boto/s3/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -27,6 +27,7 @@ import base64 import time +from boto.auth import detect_potential_s3sigv4 import boto.utils from boto.connection import AWSAuthConnection from boto import handler @@ -134,7 +135,7 @@ return url_base -class Location: +class Location(object): DEFAULT = '' # US Classic Region EU = 'EU' @@ -144,6 +145,17 @@ APNortheast = 'ap-northeast-1' APSoutheast = 'ap-southeast-1' APSoutheast2 = 'ap-southeast-2' + CNNorth1 = 'cn-north-1' + + +class NoHostProvided(object): + # An identifying object to help determine whether the user provided a + # ``host`` or not. Never instantiated. + pass + + +class HostRequiredError(BotoClientError): + pass class S3Connection(AWSAuthConnection): @@ -155,24 +167,36 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, - host=DefaultHost, debug=0, https_connection_factory=None, + host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, - validate_certs=None): - if isinstance(calling_format, str): + validate_certs=None, profile_name=None): + no_host_provided = False + if host is NoHostProvided: + no_host_provided = True + host = self.DefaultHost + if isinstance(calling_format, basestring): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon - AWSAuthConnection.__init__(self, host, + super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, - validate_certs=validate_certs) + validate_certs=validate_certs, profile_name=profile_name) + # We need to delay until after the call to ``super`` before checking + # to see if SigV4 is in use. + if no_host_provided: + if 'hmac-v4-s3' in self._required_auth_capability(): + raise HostRequiredError( + "When using SigV4, you must specify a 'host' parameter." + ) + @detect_potential_s3sigv4 def _required_auth_capability(self): if self.anon: return ['anon'] @@ -267,9 +291,9 @@ """ - if fields == None: + if fields is None: fields = [] - if conditions == None: + if conditions is None: conditions = [] expiration = time.gmtime(int(time.time() + expires_in)) @@ -415,6 +439,23 @@ ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + :type bucket_name: string :param bucket_name: The name of the bucket @@ -423,13 +464,58 @@ AWS. :type validate: boolean - :param validate: If ``True``, it will try to fetch all keys within the - given bucket. (Default: ``True``) + :param validate: If ``True``, it will try to verify the bucket exists + on the service-side. (Default: ``True``) """ - bucket = self.bucket_class(self, bucket_name) if validate: - bucket.get_all_keys(headers, maxkeys=0) - return bucket + return self.head_bucket(bucket_name, headers=headers) + else: + return self.bucket_class(self, bucket_name) + + def head_bucket(self, bucket_name, headers=None): + """ + Determines if a bucket exists by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :returns: A object + """ + response = self.make_request('HEAD', bucket_name, headers=headers) + body = response.read() + if response.status == 200: + return self.bucket_class(self, bucket_name) + elif response.status == 403: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'AccessDenied' + err.error_message = 'Access Denied' + raise err + elif response.status == 404: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'NoSuchBucket' + err.error_message = 'The specified bucket does not exist' + raise err + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) def lookup(self, bucket_name, validate=True, headers=None): """ @@ -540,8 +626,8 @@ boto.log.debug('path=%s' % path) auth_path += '?' + query_args boto.log.debug('auth_path=%s' % auth_path) - return AWSAuthConnection.make_request( - self, method, path, headers, + return super(S3Connection, self).make_request( + method, path, headers, data, host, auth_path, sender, override_num_retries=override_num_retries, retry_handler=retry_handler diff -Nru python-boto-2.20.1/boto/s3/deletemarker.py python-boto-2.29.1/boto/s3/deletemarker.py --- python-boto-2.20.1/boto/s3/deletemarker.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/deletemarker.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,14 +14,14 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.s3.user import User -class DeleteMarker: +class DeleteMarker(object): def __init__(self, bucket=None, name=None): self.bucket = bucket self.name = name diff -Nru python-boto-2.20.1/boto/s3/__init__.py python-boto-2.29.1/boto/s3/__init__.py --- python-boto-2.20.1/boto/s3/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -22,7 +22,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions class S3RegionInfo(RegionInfo): @@ -50,34 +50,11 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from .connection import S3Connection - return [S3RegionInfo(name='us-east-1', - endpoint='s3.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-gov-west-1', - endpoint='s3-us-gov-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-west-1', - endpoint='s3-us-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-west-2', - endpoint='s3-us-west-2.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-northeast-1', - endpoint='s3-ap-northeast-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-southeast-1', - endpoint='s3-ap-southeast-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-southeast-2', - endpoint='s3-ap-southeast-2.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='eu-west-1', - endpoint='s3-eu-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='sa-east-1', - endpoint='s3-sa-east-1.amazonaws.com', - connection_cls=S3Connection), - ] + return get_regions( + 's3', + region_cls=S3RegionInfo, + connection_cls=S3Connection + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/s3/key.py python-boto-2.29.1/boto/s3/key.py --- python-boto-2.20.1/boto/s3/key.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/key.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,6 +23,7 @@ from __future__ import with_statement import errno +import hashlib import mimetypes import os import re @@ -40,7 +41,7 @@ from boto.s3.keyfile import KeyFile from boto.s3.user import User from boto import UserAgent -from boto.utils import compute_md5 +from boto.utils import compute_md5, compute_hash from boto.utils import find_matching_headers from boto.utils import merge_headers_by_name try: @@ -216,7 +217,8 @@ self.delete_marker = False def handle_restore_headers(self, response): - header = response.getheader('x-amz-restore') + provider = self.bucket.connection.provider + header = response.getheader(provider.restore_header) if header is None: return parts = header.split(',', 1) @@ -257,7 +259,7 @@ with the stored object in the response. See http://goo.gl/EWOPb for details. """ - if self.resp == None: + if self.resp is None: self.mode = 'r' provider = self.bucket.connection.provider @@ -298,6 +300,7 @@ self.content_disposition = value self.handle_version_headers(self.resp) self.handle_encryption_headers(self.resp) + self.handle_restore_headers(self.resp) self.handle_addl_headers(self.resp.getheaders()) def open_write(self, headers=None, override_num_retries=None): @@ -537,19 +540,19 @@ # convenience methods for setting/getting ACL def set_acl(self, acl_str, headers=None): - if self.bucket != None: + if self.bucket is not None: self.bucket.set_acl(acl_str, self.name, headers=headers) def get_acl(self, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_acl(self.name, headers=headers) def get_xml_acl(self, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_xml_acl(self.name, headers=headers) def set_xml_acl(self, acl_str, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) def set_canned_acl(self, acl_str, headers=None): @@ -881,7 +884,7 @@ 'Content-Type', headers) elif self.path: self.content_type = mimetypes.guess_type(self.path)[0] - if self.content_type == None: + if self.content_type is None: self.content_type = self.DefaultContentType headers['Content-Type'] = self.content_type else: @@ -894,6 +897,12 @@ # headers['Trailer'] = "Content-MD5" else: headers['Content-Length'] = str(self.size) + # This is terrible. We need a SHA256 of the body for SigV4, but to do + # the chunked ``sender`` behavior above, the ``fp`` isn't available to + # the auth mechanism (because closures). Detect if it's SigV4 & embelish + # while we can before the auth calculations occur. + if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability(): + headers['_sha256'] = compute_hash(fp, hash_algorithm=hashlib.sha256)[0] headers['Expect'] = '100-Continue' headers = boto.utils.merge_meta(headers, self.metadata, provider) resp = self.bucket.connection.make_request( @@ -1053,7 +1062,7 @@ if provider.storage_class_header: headers[provider.storage_class_header] = self.storage_class - if self.bucket != None: + if self.bucket is not None: if not replace: if self.bucket.lookup(self.name): return @@ -1187,7 +1196,7 @@ # What if different providers provide different classes? if hasattr(fp, 'name'): self.path = fp.name - if self.bucket != None: + if self.bucket is not None: if not md5 and provider.supports_chunked_transfer(): # defer md5 calculation to on the fly and # we don't know anything about size yet. @@ -1226,7 +1235,7 @@ self.md5 = md5[0] self.base64md5 = md5[1] - if self.name == None: + if self.name is None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): @@ -1305,7 +1314,7 @@ reduced_redundancy, encrypt_key=encrypt_key) - def set_contents_from_string(self, s, headers=None, replace=True, + def set_contents_from_string(self, string_data, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=False, encrypt_key=False): @@ -1362,9 +1371,9 @@ be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. """ - if isinstance(s, unicode): - s = s.encode("utf-8") - fp = StringIO.StringIO(s) + if isinstance(string_data, unicode): + string_data = string_data.encode("utf-8") + fp = StringIO.StringIO(string_data) r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, reduced_redundancy, encrypt_key=encrypt_key) @@ -1409,6 +1418,14 @@ headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, torrent=torrent, version_id=version_id, @@ -1566,8 +1583,16 @@ headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ - if self.bucket != None: + if self.bucket is not None: if res_download_handler: res_download_handler.get_file(self, fp, headers, cb, num_cb, torrent=torrent, @@ -1622,6 +1647,14 @@ headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ try: with open(filename, 'wb') as fp: @@ -1634,7 +1667,7 @@ os.remove(filename) raise # if last_modified date was sent from s3, try to set file's timestamp - if self.last_modified != None: + if self.last_modified is not None: try: modified_tuple = rfc822.parsedate_tz(self.last_modified) modified_stamp = int(rfc822.mktime_tz(modified_tuple)) @@ -1680,6 +1713,14 @@ with the stored object in the response. See http://goo.gl/EWOPb for details. + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + :rtype: string :returns: The contents of the file as a string """ diff -Nru python-boto-2.20.1/boto/s3/lifecycle.py python-boto-2.29.1/boto/s3/lifecycle.py --- python-boto-2.20.1/boto/s3/lifecycle.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/lifecycle.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,16 +23,18 @@ class Rule(object): """ - A Lifcycle rule for an S3 bucket. + A Lifecycle rule for an S3 bucket. :ivar id: Unique identifier for the rule. The value cannot be longer - than 255 characters. + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. :ivar prefix: Prefix identifying one or more objects to which the - rule applies. + rule applies. If prefix is not provided, Boto generates a default + prefix which will match all objects. - :ivar status: If Enabled, the rule is currently being applied. - If Disabled, the rule is not currently being applied. + :ivar status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. :ivar expiration: An instance of `Expiration`. This indicates the lifetime of the objects that are subject to the rule. @@ -44,7 +46,7 @@ def __init__(self, id=None, prefix=None, status=None, expiration=None, transition=None): self.id = id - self.prefix = prefix + self.prefix = '' if prefix is None else prefix self.status = status if isinstance(expiration, (int, long)): # retain backwards compatibility??? @@ -78,7 +80,8 @@ def to_xml(self): s = '' - s += '%s' % self.id + if self.id is not None: + s += '%s' % self.id s += '%s' % self.prefix s += '%s' % self.status if self.expiration is not None: @@ -199,7 +202,8 @@ s += '' return s - def add_rule(self, id, prefix, status, expiration, transition=None): + def add_rule(self, id=None, prefix='', status='Enabled', + expiration=None, transition=None): """ Add a rule to this Lifecycle configuration. This only adds the rule to the local copy. To install the new rule(s) on @@ -208,7 +212,8 @@ :type id: str :param id: Unique identifier for the rule. The value cannot be longer - than 255 characters. + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. :type prefix: str :iparam prefix: Prefix identifying one or more objects to which the diff -Nru python-boto-2.20.1/boto/s3/multipart.py python-boto-2.29.1/boto/s3/multipart.py --- python-boto-2.20.1/boto/s3/multipart.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/multipart.py 2014-05-30 20:49:34.000000000 +0000 @@ -199,7 +199,8 @@ else: setattr(self, name, value) - def get_all_parts(self, max_parts=None, part_number_marker=None): + def get_all_parts(self, max_parts=None, part_number_marker=None, + encoding_type=None): """ Return the uploaded parts of this MultiPart Upload. This is a lower-level method that requires you to manually page through @@ -213,6 +214,8 @@ query_args += '&max-parts=%d' % max_parts if part_number_marker: query_args += '&part-number-marker=%s' % part_number_marker + if encoding_type: + query_args += '&encoding-type=%s' % encoding_type response = self.bucket.connection.make_request('GET', self.bucket.name, self.key_name, query_args=query_args) @@ -227,6 +230,14 @@ """ Upload another part of this MultiPart Upload. + .. note:: + + After you initiate multipart upload and upload one or more parts, + you must either complete or abort multipart upload in order to stop + getting charged for storage of the uploaded parts. Only after you + either complete or abort multipart upload, Amazon S3 frees up the + parts storage and stops charging you for the parts storage. + :type fp: file :param fp: The file object you want to upload. diff -Nru python-boto-2.20.1/boto/s3/resumable_download_handler.py python-boto-2.29.1/boto/s3/resumable_download_handler.py --- python-boto-2.20.1/boto/s3/resumable_download_handler.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/resumable_download_handler.py 2014-05-30 20:49:34.000000000 +0000 @@ -140,7 +140,7 @@ # is attempted on an object), but warn user for other errors. if e.errno != errno.ENOENT: # Will restart because - # self.etag_value_for_current_download == None. + # self.etag_value_for_current_download is None. print('Couldn\'t read URI tracker file (%s): %s. Restarting ' 'download from scratch.' % (self.tracker_file_name, e.strerror)) diff -Nru python-boto-2.20.1/boto/s3/user.py python-boto-2.29.1/boto/s3/user.py --- python-boto-2.20.1/boto/s3/user.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/s3/user.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,12 +14,12 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -class User: +class User(object): def __init__(self, parent=None, id='', display_name=''): if parent: parent.owner = self @@ -46,4 +46,4 @@ s += '%s' % self.id s += '%s' % self.display_name s += '' % element_name - return s + return s diff -Nru python-boto-2.20.1/boto/sdb/connection.py python-boto-2.29.1/boto/sdb/connection.py --- python-boto-2.20.1/boto/sdb/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -48,7 +48,7 @@ :class:`Domain `. :ivar list items: A list of items retrieved. Starts as empty list. """ - threading.Thread.__init__(self, name=name) + super(ItemThread, self).__init__(name=name) #print 'starting %s with %d items' % (name, len(item_names)) self.domain_name = domain_name self.conn = SDBConnection() @@ -86,7 +86,8 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - converter=None, security_token=None, validate_certs=True): + converter=None, security_token=None, validate_certs=True, + profile_name=None): """ For any keywords that aren't documented, refer to the parent class, :py:class:`boto.connection.AWSAuthConnection`. You can avoid having @@ -111,14 +112,15 @@ break self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(SDBConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.box_usage = 0.0 self.converter = converter self.item_cls = Item @@ -493,7 +495,7 @@ response = self.make_request('GetAttributes', params) body = response.read() if response.status == 200: - if item == None: + if item is None: item = self.item_cls(domain, item_name) h = handler.XmlHandler(item, self) xml.sax.parseString(body, h) diff -Nru python-boto-2.20.1/boto/sdb/db/key.py python-boto-2.29.1/boto/sdb/db/key.py --- python-boto-2.20.1/boto/sdb/db/key.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/key.py 2014-05-30 20:49:34.000000000 +0000 @@ -50,7 +50,7 @@ return self.id def has_id_or_name(self): - return self.id != None + return self.id is not None def parent(self): raise NotImplementedError("Key parents are not currently supported") diff -Nru python-boto-2.20.1/boto/sdb/db/manager/sdbmanager.py python-boto-2.29.1/boto/sdb/db/manager/sdbmanager.py --- python-boto-2.20.1/boto/sdb/db/manager/sdbmanager.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/manager/sdbmanager.py 2014-05-30 20:49:34.000000000 +0000 @@ -107,7 +107,7 @@ def encode_map(self, prop, value): import urllib - if value == None: + if value is None: return None if not isinstance(value, dict): raise ValueError('Expected a dict value, got %s' % type(value)) @@ -117,7 +117,7 @@ if self.model_class in item_type.mro(): item_type = self.model_class encoded_value = self.encode(item_type, value[key]) - if encoded_value != None: + if encoded_value is not None: new_value.append('%s:%s' % (urllib.quote(key), encoded_value)) return new_value @@ -136,7 +136,7 @@ item_type = getattr(prop, "item_type") dec_val = {} for val in value: - if val != None: + if val is not None: k, v = self.decode_map_element(item_type, val) try: k = int(k) @@ -264,7 +264,7 @@ return float(mantissa + 'e' + exponent) def encode_datetime(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value if isinstance(value, datetime): return value.strftime(ISO8601) @@ -289,7 +289,7 @@ return None def encode_date(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value return value.isoformat() @@ -322,7 +322,7 @@ def encode_reference(self, value): if value in (None, 'None', '', ' '): return None - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value else: return value.id @@ -335,7 +335,7 @@ def encode_blob(self, value): if not value: return None - if isinstance(value, str): + if isinstance(value, basestring): return value if not value.id: @@ -351,7 +351,7 @@ else: raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) - if value.value != None: + if value.value is not None: key.set_contents_from_string(value.value) return value.id @@ -415,7 +415,7 @@ self.converter = SDBConverter(self) self._sdb = None self._domain = None - if consistent == None and hasattr(cls, "__consistent__"): + if consistent is None and hasattr(cls, "__consistent__"): consistent = cls.__consistent__ self.consistent = consistent @@ -456,7 +456,7 @@ yield obj def encode_value(self, prop, value): - if value == None: + if value is None: return None if not prop: return str(value) @@ -544,7 +544,7 @@ name = 'itemName()' if name != "itemName()": name = '`%s`' % name - if val == None: + if val is None: if op in ('is', '='): return "%(name)s is null" % {"name": name} elif op in ('is not', '!='): @@ -581,11 +581,11 @@ order_by_filtered = True query_parts.append("(%s)" % select) - if isinstance(filters, str) or isinstance(filters, unicode): + if isinstance(filters, basestring): query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__) if order_by in ["__id__", "itemName()"]: query += " ORDER BY itemName() %s" % order_by_method - elif order_by != None: + elif order_by is not None: query += " ORDER BY `%s` %s" % (order_by, order_by_method) return query @@ -667,7 +667,7 @@ value = self.encode_value(property, value) if value == []: value = None - if value == None: + if value is None: del_attrs.append(property.name) continue attrs[property.name] = value diff -Nru python-boto-2.20.1/boto/sdb/db/manager/xmlmanager.py python-boto-2.29.1/boto/sdb/db/manager/xmlmanager.py --- python-boto-2.20.1/boto/sdb/db/manager/xmlmanager.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/manager/xmlmanager.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -27,7 +27,7 @@ ISO8601 = '%Y-%m-%dT%H:%M:%SZ' -class XMLConverter: +class XMLConverter(object): """ Responsible for converting base Python types to format compatible with underlying database. For SimpleDB, that means everything needs to be converted to a string @@ -145,9 +145,9 @@ return None def encode_reference(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value - if value == None: + if value is None: return '' else: val_node = self.manager.doc.createElement("object") @@ -179,7 +179,7 @@ class XMLManager(object): - + def __init__(self, cls, db_name, db_user, db_passwd, db_host, db_port, db_table, ddl_dir, enable_ssl): self.cls = cls @@ -260,7 +260,7 @@ def get_doc(self): return self.doc - + def encode_value(self, prop, value): return self.converter.encode_prop(prop, value) @@ -296,7 +296,7 @@ prop = obj.find_property(prop_name) value = self.decode_value(prop, prop_node) value = prop.make_value_from_datastore(value) - if value != None: + if value is not None: try: setattr(obj, prop.name, value) except: @@ -321,11 +321,11 @@ prop = cls.find_property(prop_name) value = self.decode_value(prop, prop_node) value = prop.make_value_from_datastore(value) - if value != None: + if value is not None: props[prop.name] = value return (cls, props, id) - - + + def get_object(self, cls, id): if not self.connection: self._connect() @@ -352,7 +352,7 @@ query = str(self._build_query(cls, filters, limit, order_by)) if query: url = "/%s?%s" % (self.db_name, urlencode({"query": query})) - else: + else: url = "/%s" % self.db_name resp = self._make_request('GET', url) if resp.status == 200: @@ -466,18 +466,18 @@ return doc def unmarshal_object(self, fp, cls=None, id=None): - if isinstance(fp, str) or isinstance(fp, unicode): + if isinstance(fp, basestring): doc = parseString(fp) else: doc = parse(fp) return self.get_object_from_doc(cls, id, doc) - + def unmarshal_props(self, fp, cls=None, id=None): """ Same as unmarshalling an object, except it returns from "get_props_from_doc" """ - if isinstance(fp, str) or isinstance(fp, unicode): + if isinstance(fp, basestring): doc = parseString(fp) else: doc = parse(fp) @@ -499,7 +499,7 @@ return a[name] else: return None - + def get_raw_item(self, obj): return self.domain.get_item(obj.id) diff -Nru python-boto-2.20.1/boto/sdb/db/model.py python-boto-2.29.1/boto/sdb/db/model.py --- python-boto-2.20.1/boto/sdb/db/model.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/model.py 2014-05-30 20:49:34.000000000 +0000 @@ -270,7 +270,7 @@ return cls for sc in cls.__sub_classes__: r = sc.find_subclass(name) - if r != None: + if r is not None: return r class Expando(Model): diff -Nru python-boto-2.20.1/boto/sdb/db/property.py python-boto-2.29.1/boto/sdb/db/property.py --- python-boto-2.20.1/boto/sdb/db/property.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/property.py 2014-05-30 20:49:34.000000000 +0000 @@ -85,7 +85,7 @@ return self.default def validate(self, value): - if self.required and value == None: + if self.required and value is None: raise ValueError('%s is a required property' % self.name) if self.choices and value and not value in self.choices: raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)) @@ -111,9 +111,9 @@ def validate_string(value): - if value == None: + if value is None: return - elif isinstance(value, str) or isinstance(value, unicode): + elif isinstance(value, basestring): if len(value) > 1024: raise ValueError('Length of value greater than maxlength') else: @@ -127,7 +127,7 @@ def __init__(self, verbose_name=None, name=None, default='', required=False, validator=validate_string, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, + super(StringProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) @@ -138,13 +138,13 @@ def __init__(self, verbose_name=None, name=None, default='', required=False, validator=None, choices=None, unique=False, max_length=None): - Property.__init__(self, verbose_name, name, default, required, + super(TextProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.max_length = max_length def validate(self, value): value = super(TextProperty, self).validate(value) - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): raise TypeError('Expecting Text, got %s' % type(value)) if self.max_length and len(value) > self.max_length: raise ValueError('Length of value greater than maxlength %s' % self.max_length) @@ -207,7 +207,7 @@ The remaining parameters are passed through to StringProperty.__init__""" - StringProperty.__init__(self, verbose_name, name, default, required, + super(PasswordProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.hashfunc = hashfunc @@ -216,7 +216,7 @@ return p def get_value_for_datastore(self, model_instance): - value = StringProperty.get_value_for_datastore(self, model_instance) + value = super(PasswordProperty, self).get_value_for_datastore(model_instance) if value and len(value): return str(value) else: @@ -227,13 +227,13 @@ p = self.data_type(hashfunc=self.hashfunc) p.set(value) value = p - Property.__set__(self, obj, value) + super(PasswordProperty, self).__set__(obj, value) def __get__(self, obj, objtype): - return self.data_type(StringProperty.__get__(self, obj, objtype), hashfunc=self.hashfunc) + return self.data_type(super(PasswordProperty, self).__get__(obj, objtype), hashfunc=self.hashfunc) def validate(self, value): - value = Property.validate(self, value) + value = super(PasswordProperty, self).validate(value) if isinstance(value, self.data_type): if len(value) > 1024: raise ValueError('Length of value greater than maxlength') @@ -254,7 +254,7 @@ id = oldb.id b = Blob(value=value, id=id) value = b - Property.__set__(self, obj, value) + super(BlobProperty, self).__set__(obj, value) class S3KeyProperty(Property): @@ -265,7 +265,7 @@ def __init__(self, verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, + super(S3KeyProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) def validate(self, value): @@ -280,7 +280,7 @@ raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) def __get__(self, obj, objtype): - value = Property.__get__(self, obj, objtype) + value = super(S3KeyProperty, self).__get__(obj, objtype) if value: if isinstance(value, self.data_type): return value @@ -297,7 +297,7 @@ return value def get_value_for_datastore(self, model_instance): - value = Property.get_value_for_datastore(self, model_instance) + value = super(S3KeyProperty, self).get_value_for_datastore(model_instance) if value: return "s3://%s/%s" % (value.bucket.name, value.name) else: @@ -311,13 +311,13 @@ def __init__(self, verbose_name=None, name=None, default=0, required=False, validator=None, choices=None, unique=False, max=2147483647, min=-2147483648): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(IntegerProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.max = max self.min = min def validate(self, value): value = int(value) - value = Property.validate(self, value) + value = super(IntegerProperty, self).validate(value) if value > self.max: raise ValueError('Maximum value is %d' % self.max) if value < self.min: @@ -328,9 +328,9 @@ return value is None def __set__(self, obj, value): - if value == "" or value == None: + if value == "" or value is None: value = 0 - return Property.__set__(self, obj, value) + return super(IntegerProperty, self).__set__(obj, value) class LongProperty(Property): @@ -340,11 +340,11 @@ def __init__(self, verbose_name=None, name=None, default=0, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(LongProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) def validate(self, value): value = long(value) - value = Property.validate(self, value) + value = super(LongProperty, self).validate(value) min = -9223372036854775808 max = 9223372036854775807 if value > max: @@ -364,7 +364,7 @@ def __init__(self, verbose_name=None, name=None, default=False, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(BooleanProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) def empty(self, value): return value is None @@ -377,11 +377,11 @@ def __init__(self, verbose_name=None, name=None, default=0.0, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(FloatProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) def validate(self, value): value = float(value) - value = Property.validate(self, value) + value = super(FloatProperty, self).validate(value) return value def empty(self, value): @@ -398,17 +398,17 @@ def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, default=None, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(DateTimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.auto_now = auto_now self.auto_now_add = auto_now_add def default_value(self): if self.auto_now or self.auto_now_add: return self.now() - return Property.default_value(self) + return super(DateTimeProperty, self).default_value() def validate(self, value): - if value == None: + if value is None: return if isinstance(value, datetime.date): return value @@ -417,7 +417,7 @@ def get_value_for_datastore(self, model_instance): if self.auto_now: setattr(model_instance, self.name, self.now()) - return Property.get_value_for_datastore(self, model_instance) + return super(DateTimeProperty, self).get_value_for_datastore(model_instance) def now(self): return datetime.datetime.utcnow() @@ -430,18 +430,18 @@ def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, default=None, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(DateProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.auto_now = auto_now self.auto_now_add = auto_now_add def default_value(self): if self.auto_now or self.auto_now_add: return self.now() - return Property.default_value(self) + return super(DateProperty, self).default_value() def validate(self, value): value = super(DateProperty, self).validate(value) - if value == None: + if value is None: return if not isinstance(value, self.data_type): raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) @@ -449,7 +449,7 @@ def get_value_for_datastore(self, model_instance): if self.auto_now: setattr(model_instance, self.name, self.now()) - val = Property.get_value_for_datastore(self, model_instance) + val = super(DateProperty, self).get_value_for_datastore(model_instance) if isinstance(val, datetime.datetime): val = val.date() return val @@ -464,7 +464,7 @@ def __init__(self, verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(TimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) def validate(self, value): value = super(TimeProperty, self).validate(value) @@ -481,7 +481,7 @@ def __init__(self, reference_class=None, collection_name=None, verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): - Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + super(ReferenceProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.reference_class = reference_class self.collection_name = collection_name @@ -493,7 +493,7 @@ # If the value is still the UUID for the referenced object, we need to create # the object now that is the attribute has actually been accessed. This lazy # instantiation saves unnecessary roundtrips to SimpleDB - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): value = self.reference_class(value) setattr(obj, self.name, value) return value @@ -501,12 +501,12 @@ def __set__(self, obj, value): """Don't allow this object to be associated to itself This causes bad things to happen""" - if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): + if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): raise ValueError("Can not associate an object with itself!") return super(ReferenceProperty, self).__set__(obj, value) def __property_config__(self, model_class, property_name): - Property.__property_config__(self, model_class, property_name) + super(ReferenceProperty, self).__property_config__(model_class, property_name) if self.collection_name is None: self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name) if hasattr(self.reference_class, self.collection_name): @@ -533,11 +533,11 @@ def validate(self, value): if self.validator: self.validator(value) - if self.required and value == None: + if self.required and value is None: raise ValueError('%s is a required property' % self.name) if value == self.default_value(): return - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): self.check_instance(value) @@ -576,7 +576,7 @@ def __init__(self, verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, calculated_type=int, unique=False, use_method=False): - Property.__init__(self, verbose_name, name, default, required, + super(CalculatedProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) self.calculated_type = calculated_type self.use_method = use_method @@ -617,7 +617,7 @@ if default is None: default = [] self.item_type = item_type - Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + super(ListProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) def validate(self, value): if self.validator: @@ -658,7 +658,7 @@ item_type = self.item_type if isinstance(value, item_type): value = [value] - elif value == None: # Override to allow them to set this to "None" to remove everything + elif value is None: # Override to allow them to set this to "None" to remove everything value = [] return super(ListProperty, self).__set__(obj, value) @@ -672,7 +672,7 @@ if default is None: default = {} self.item_type = item_type - Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + super(MapProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) def validate(self, value): value = super(MapProperty, self).validate(value) diff -Nru python-boto-2.20.1/boto/sdb/db/query.py python-boto-2.29.1/boto/sdb/db/query.py --- python-boto-2.20.1/boto/sdb/db/query.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/query.py 2014-05-30 20:49:34.000000000 +0000 @@ -39,7 +39,7 @@ return iter(self.manager.query(self)) def next(self): - if self.__local_iter__ == None: + if self.__local_iter__ is None: self.__local_iter__ = self.__iter__() return self.__local_iter__.next() diff -Nru python-boto-2.20.1/boto/sdb/db/sequence.py python-boto-2.29.1/boto/sdb/db/sequence.py --- python-boto-2.20.1/boto/sdb/db/sequence.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/db/sequence.py 2014-05-30 20:49:34.000000000 +0000 @@ -59,7 +59,7 @@ # If they pass us in a string that's not at least # the lenght of our sequence, then return the # first element in our sequence - if val == None or len(val) < self.sequence_length: + if val is None or len(val) < self.sequence_length: return self.sequence_string[0] last_value = val[-self.sequence_length:] if (not self.rollover) and (last_value == self.last_item): @@ -79,21 +79,21 @@ # Simple Sequence Functions # def increment_by_one(cv=None, lv=None): - if cv == None: + if cv is None: return 0 return cv + 1 def double(cv=None, lv=None): - if cv == None: + if cv is None: return 1 return cv * 2 def fib(cv=1, lv=0): """The fibonacci sequence, this incrementer uses the last value""" - if cv == None: + if cv is None: cv = 1 - if lv == None: + if lv is None: lv = 0 return cv + lv @@ -136,17 +136,17 @@ self.last_value = None self.domain_name = domain_name self.id = id - if init_val == None: + if init_val is None: init_val = fnc(init_val) - if self.id == None: + if self.id is None: import uuid self.id = str(uuid.uuid4()) self.item_type = type(fnc(None)) self.timestamp = None # Allow us to pass in a full name to a function - if isinstance(fnc, str): + if isinstance(fnc, basestring): from boto.utils import find_class fnc = find_class(fnc) self.fnc = fnc @@ -162,7 +162,7 @@ expected_value = [] new_val = {} new_val['timestamp'] = now - if self._value != None: + if self._value is not None: new_val['last_value'] = self._value expected_value = ['current_value', str(self._value)] new_val['current_value'] = val @@ -184,7 +184,7 @@ self.timestamp = val['timestamp'] if 'current_value' in val: self._value = self.item_type(val['current_value']) - if "last_value" in val and val['last_value'] != None: + if "last_value" in val and val['last_value'] is not None: self.last_value = self.item_type(val['last_value']) return self._value diff -Nru python-boto-2.20.1/boto/sdb/domain.py python-boto-2.29.1/boto/sdb/domain.py --- python-boto-2.20.1/boto/sdb/domain.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/domain.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -24,7 +24,7 @@ """ from boto.sdb.queryresultset import SelectResultSet -class Domain: +class Domain(object): def __init__(self, connection=None, name=None): self.connection = connection @@ -64,19 +64,19 @@ :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting - of a single attribute name and expected value. The list can be + of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] - In which case the call will first verify that the attribute + In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete - will proceed, otherwise a ConditionalCheckFailed error will be + will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: - + * ['name', True|False] - - which will simply check for the existence (True) or non-existence + + which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool @@ -144,22 +144,22 @@ a dict or Item containing the attribute names and keys and list of values to delete as the value. If no value is supplied, all attribute name/values for the item will be deleted. - + :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting - of a single attribute name and expected value. The list can be of + of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete - will proceed, otherwise a ConditionalCheckFailed error will be + will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] - which will simply check for the existence (True) or + which will simply check for the existence (True) or non-existence (False) of the attribute. :rtype: bool @@ -171,7 +171,7 @@ def batch_delete_attributes(self, items): """ Delete multiple items in this domain. - + :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: @@ -182,7 +182,7 @@ will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated - with the item should be deleted. + with the item should be deleted. :rtype: bool :return: True if successful @@ -209,12 +209,12 @@ def get_item(self, item_name, consistent_read=False): """ Retrieves an item from the domain, along with all of its attributes. - + :param string item_name: The name of the item to retrieve. :rtype: :class:`boto.sdb.item.Item` or ``None`` - :keyword bool consistent_read: When set to true, ensures that the most + :keyword bool consistent_read: When set to true, ensures that the most recent data is returned. - :return: The requested item, or ``None`` if there was no match found + :return: The requested item, or ``None`` if there was no match found """ item = self.get_attributes(item_name, consistent_read=consistent_read) if item: @@ -279,7 +279,7 @@ return self.connection.delete_domain(self) -class DomainMetaData: +class DomainMetaData(object): def __init__(self, domain=None): self.domain = domain @@ -364,7 +364,7 @@ def __init__(self, domain): self.db = domain self.items = {} - Thread.__init__(self) + super(UploaderThread, self).__init__() def run(self): try: diff -Nru python-boto-2.20.1/boto/sdb/__init__.py python-boto-2.29.1/boto/sdb/__init__.py --- python-boto-2.20.1/boto/sdb/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,6 +21,7 @@ # from .regioninfo import SDBRegionInfo +from boto.regioninfo import get_regions def regions(): @@ -30,23 +31,10 @@ :rtype: list :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances """ - return [SDBRegionInfo(name='us-east-1', - endpoint='sdb.amazonaws.com'), - SDBRegionInfo(name='eu-west-1', - endpoint='sdb.eu-west-1.amazonaws.com'), - SDBRegionInfo(name='us-west-1', - endpoint='sdb.us-west-1.amazonaws.com'), - SDBRegionInfo(name='sa-east-1', - endpoint='sdb.sa-east-1.amazonaws.com'), - SDBRegionInfo(name='us-west-2', - endpoint='sdb.us-west-2.amazonaws.com'), - SDBRegionInfo(name='ap-northeast-1', - endpoint='sdb.ap-northeast-1.amazonaws.com'), - SDBRegionInfo(name='ap-southeast-1', - endpoint='sdb.ap-southeast-1.amazonaws.com'), - SDBRegionInfo(name='ap-southeast-2', - endpoint='sdb.ap-southeast-2.amazonaws.com') - ] + return get_regions( + 'sdb', + region_cls=SDBRegionInfo + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/sdb/item.py python-boto-2.29.1/boto/sdb/item.py --- python-boto-2.20.1/boto/sdb/item.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/item.py 2014-05-30 20:49:34.000000000 +0000 @@ -123,7 +123,7 @@ if replace: del_attrs = [] for name in self: - if self[name] == None: + if self[name] is None: del_attrs.append(name) if len(del_attrs) > 0: self.domain.delete_attributes(self.name, del_attrs) diff -Nru python-boto-2.20.1/boto/sdb/queryresultset.py python-boto-2.29.1/boto/sdb/queryresultset.py --- python-boto-2.20.1/boto/sdb/queryresultset.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/queryresultset.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -33,9 +33,9 @@ yield item num_results += 1 next_token = rs.next_token - more_results = next_token != None - -class QueryResultSet: + more_results = next_token is not None + +class QueryResultSet(object): def __init__(self, domain=None, query='', max_items=None, attr_names=None): self.max_items = max_items @@ -59,8 +59,8 @@ yield item num_results += 1 next_token = rs.next_token - more_results = next_token != None - + more_results = next_token is not None + class SelectResultSet(object): def __init__(self, domain=None, query='', max_items=None, @@ -86,7 +86,7 @@ self.next_token = rs.next_token if self.max_items and num_results >= self.max_items: raise StopIteration - more_results = self.next_token != None + more_results = self.next_token is not None def next(self): return self.__iter__().next() diff -Nru python-boto-2.20.1/boto/sdb/regioninfo.py python-boto-2.29.1/boto/sdb/regioninfo.py --- python-boto-2.20.1/boto/sdb/regioninfo.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sdb/regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -16,7 +16,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,8 @@ class SDBRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.sdb.connection import SDBConnection - RegionInfo.__init__(self, connection, name, endpoint, + super(SDBRegionInfo, self).__init__(connection, name, endpoint, SDBConnection) diff -Nru python-boto-2.20.1/boto/services/message.py python-boto-2.29.1/boto/services/message.py --- python-boto-2.20.1/boto/services/message.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/message.py 2014-05-30 20:49:34.000000000 +0000 @@ -34,7 +34,7 @@ self['OriginalLocation'] = t[0] self['OriginalFileName'] = t[1] mime_type = mimetypes.guess_type(t[1])[0] - if mime_type == None: + if mime_type is None: mime_type = 'application/octet-stream' self['Content-Type'] = mime_type s = os.stat(key.path) diff -Nru python-boto-2.20.1/boto/services/result.py python-boto-2.29.1/boto/services/result.py --- python-boto-2.20.1/boto/services/result.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/result.py 2014-05-30 20:49:34.000000000 +0000 @@ -15,7 +15,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -25,8 +25,8 @@ from boto.utils import parse_ts import boto -class ResultProcessor: - +class ResultProcessor(object): + LogFileName = 'log.csv' def __init__(self, batch_name, sd, mimetype_files=None): @@ -133,4 +133,4 @@ print 'Elapsed Time: %d' % self.elapsed_time.seconds tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files) print 'Throughput: %f transactions / minute' % tput - + diff -Nru python-boto-2.20.1/boto/services/servicedef.py python-boto-2.29.1/boto/services/servicedef.py --- python-boto-2.20.1/boto/services/servicedef.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/servicedef.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,7 @@ class ServiceDef(Config): def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): - Config.__init__(self, config_file) + super(ServiceDef, self).__init__(config_file) self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key script = Config.get(self, 'Pyami', 'scripts') @@ -37,22 +37,22 @@ def get(self, name, default=None): - return Config.get(self, self.name, name, default) + return super(ServiceDef, self).get(self.name, name, default) def has_option(self, option): - return Config.has_option(self, self.name, option) + return super(ServiceDef, self).has_option(self.name, option) def getint(self, option, default=0): try: - val = Config.get(self, self.name, option) + val = super(ServiceDef, self).get(self.name, option) val = int(val) except: val = int(default) return val - + def getbool(self, option, default=False): try: - val = Config.get(self, self.name, option) + val = super(ServiceDef, self).get(self.name, option) if val.lower() == 'true': val = True else: @@ -60,7 +60,7 @@ except: val = default return val - + def get_obj(self, name): """ Returns the AWS object associated with a given option. @@ -88,4 +88,4 @@ obj = None return obj - + diff -Nru python-boto-2.20.1/boto/services/service.py python-boto-2.29.1/boto/services/service.py --- python-boto-2.20.1/boto/services/service.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/service.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -35,7 +35,7 @@ ProcessingTime = 60 def __init__(self, config_file=None, mimetype_files=None): - ScriptBase.__init__(self, config_file) + super(Service, self).__init__(config_file) self.name = self.__class__.__name__ self.working_dir = boto.config.get('Pyami', 'working_dir') self.sd = ServiceDef(config_file) @@ -100,7 +100,7 @@ key = self.put_file(output_bucket, file, key_name) output_keys.append('%s;type=%s' % (key.name, type)) output_message['OutputKey'] = ','.join(output_keys) - + # write message to each output queue def write_message(self, message): message['Service-Write'] = get_ts() diff -Nru python-boto-2.20.1/boto/services/sonofmmm.py python-boto-2.29.1/boto/services/sonofmmm.py --- python-boto-2.20.1/boto/services/sonofmmm.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/sonofmmm.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -28,7 +28,7 @@ class SonOfMMM(Service): def __init__(self, config_file=None): - Service.__init__(self, config_file) + super(SonOfMMM, self).__init__(config_file) self.log_file = '%s.log' % self.instance_id self.log_path = os.path.join(self.working_dir, self.log_file) boto.set_file_logger(self.name, self.log_path) @@ -78,4 +78,4 @@ if self.output_bucket: key = self.output_bucket.new_key(self.log_file) key.set_contents_from_filename(self.log_path) - Service.shutdown(self) + super(SonOfMMM, self).shutdown() diff -Nru python-boto-2.20.1/boto/services/submit.py python-boto-2.29.1/boto/services/submit.py --- python-boto-2.20.1/boto/services/submit.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/services/submit.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -23,7 +23,7 @@ import os -class Submitter: +class Submitter(object): def __init__(self, sd): self.sd = sd diff -Nru python-boto-2.20.1/boto/ses/connection.py python-boto-2.29.1/boto/ses/connection.py --- python-boto-2.20.1/boto/ses/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ses/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -42,18 +42,19 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSAuthConnection.__init__(self, self.region.endpoint, + super(SESConnection, self).__init__(self.region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['ses'] @@ -520,3 +521,46 @@ return self._make_request('DeleteIdentity', { 'Identity': identity, }) + + def set_identity_notification_topic(self, identity, notification_type, sns_topic=None): + """Sets an SNS topic to publish bounce or complaint notifications for + emails sent with the given identity as the Source. Publishing to topics + may only be disabled when feedback forwarding is enabled. + + :type identity: string + :param identity: An email address or domain name. + + :type notification_type: string + :param notification_type: The type of feedback notifications that will + be published to the specified topic. + Valid Values: Bounce | Complaint + + :type sns_topic: string or None + :param sns_topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (Amazon SNS) topic. + """ + params = { + 'Identity': identity, + 'NotificationType': notification_type + } + if sns_topic: + params['SnsTopic'] = sns_topic + return self._make_request('SetIdentityNotificationTopic', params) + + def set_identity_feedback_forwarding_enabled(self, identity, forwarding_enabled=True): + """ + Enables or disables SES feedback notification via email. + Feedback forwarding may only be disabled when both complaint and + bounce topics are set. + + :type identity: string + :param identity: An email address or domain name. + + :type forwarding_enabled: bool + :param forwarding_enabled: Specifies whether or not to enable feedback forwarding. + """ + return self._make_request('SetIdentityFeedbackForwardingEnabled', { + 'Identity': identity, + 'ForwardingEnabled': 'true' if forwarding_enabled else 'false' + }) + diff -Nru python-boto-2.20.1/boto/ses/__init__.py python-boto-2.29.1/boto/ses/__init__.py --- python-boto-2.20.1/boto/ses/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/ses/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # IN THE SOFTWARE. from connection import SESConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,9 +31,7 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='email.us-east-1.amazonaws.com', - connection_cls=SESConnection)] + return get_regions('ses', connection_cls=SESConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/sns/connection.py python-boto-2.29.1/boto/sns/connection.py --- python-boto-2.20.1/boto/sns/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sns/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -48,28 +48,32 @@ requests, and handling error responses. For a list of available SDKs, go to `Tools for Amazon Web Services`_. """ - DefaultRegionName = 'us-east-1' - DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com' - APIVersion = '2010-03-31' + DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint', + 'sns.us-east-1.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31') + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, + profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, connection_cls=SNSConnection) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(SNSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _build_dict_as_list_params(self, params, dictionary, name): """ @@ -264,7 +268,7 @@ :type protocol: string :param protocol: The protocol used to communicate with the subscriber. Current choices are: - email|email-json|http|https|sqs|sms + email|email-json|http|https|sqs|sms|application :type endpoint: string :param endpoint: The location of the endpoint for @@ -274,7 +278,10 @@ * For http, this would be a URL beginning with http * For https, this would be a URL beginning with https * For sqs, this would be the ARN of an SQS Queue - * For sms, this would be a phone number of an SMS-enabled device + * For sms, this would be a phone number of an + SMS-enabled device + * For application, the endpoint is the EndpointArn + of a mobile app and device. """ params = {'TopicArn': topic, 'Protocol': protocol, diff -Nru python-boto-2.20.1/boto/sns/__init__.py python-boto-2.29.1/boto/sns/__init__.py --- python-boto-2.20.1/boto/sns/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sns/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,7 +23,7 @@ # this is here for backward compatibility # originally, the SNSConnection class was defined here from connection import SNSConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -33,34 +33,7 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='sns.us-east-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='eu-west-1', - endpoint='sns.eu-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-gov-west-1', - endpoint='sns.us-gov-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-west-1', - endpoint='sns.us-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='sa-east-1', - endpoint='sns.sa-east-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-west-2', - endpoint='sns.us-west-2.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-northeast-1', - endpoint='sns.ap-northeast-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-southeast-1', - endpoint='sns.ap-southeast-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-southeast-2', - endpoint='sns.ap-southeast-2.amazonaws.com', - connection_cls=SNSConnection), - ] + return get_regions('sns', connection_cls=SNSConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/sqs/bigmessage.py python-boto-2.29.1/boto/sqs/bigmessage.py --- python-boto-2.20.1/boto/sqs/bigmessage.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/bigmessage.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,119 @@ +# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +import boto +from boto.sqs.message import RawMessage +from boto.exception import SQSDecodeError + + +class BigMessage(RawMessage): + """ + The BigMessage class provides large payloads (up to 5GB) + by storing the payload itself in S3 and then placing a reference + to the S3 object in the actual SQS message payload. + + To create a BigMessage, you should create a BigMessage object + and pass in a file-like object as the ``body`` param and also + pass in the an S3 URL specifying the bucket in which to store + the message body:: + + import boto.sqs + from boto.sqs.bigmessage import BigMessage + + sqs = boto.sqs.connect_to_region('us-west-2') + queue = sqs.get_queue('myqueue') + fp = open('/path/to/bigmessage/data') + msg = BigMessage(queue, fp, 's3://mybucket') + queue.write(msg) + + Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo) + is interpreted to mean that the body of the message is already + stored in S3 and the that S3 URL is then used directly with no + content uploaded by BigMessage. + """ + + def __init__(self, queue=None, body=None, s3_url=None): + self.s3_url = s3_url + super(BigMessage, self).__init__(queue, body) + + def _get_bucket_key(self, s3_url): + bucket_name = key_name = None + if s3_url: + if s3_url.startswith('s3://'): + # We need to split out the bucket from the key (if + # supplied). We also have to be aware that someone + # may provide a trailing '/' character as in: + # s3://foo/ and we want to handle that. + s3_components = s3_url[5:].split('/', 1) + bucket_name = s3_components[0] + if len(s3_components) > 1: + if s3_components[1]: + key_name = s3_components[1] + else: + msg = 's3_url parameter should start with s3://' + raise SQSDecodeError(msg, self) + return bucket_name, key_name + + def encode(self, value): + """ + :type value: file-like object + :param value: A file-like object containing the content + of the message. The actual content will be stored + in S3 and a link to the S3 object will be stored in + the message body. + """ + bucket_name, key_name = self._get_bucket_key(self.s3_url) + if bucket_name and key_name: + return self.s3_url + key_name = uuid.uuid4() + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.new_key(key_name) + key.set_contents_from_file(value) + self.s3_url = 's3://%s/%s' % (bucket_name, key_name) + return self.s3_url + + def _get_s3_object(self, s3_url): + bucket_name, key_name = self._get_bucket_key(s3_url) + if bucket_name and key_name: + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.get_key(key_name) + return key + else: + msg = 'Unable to decode S3 URL: %s' % s3_url + raise SQSDecodeError(msg, self) + + def decode(self, value): + self.s3_url = value + key = self._get_s3_object(value) + return key.get_contents_as_string() + + def delete(self): + # Delete the object in S3 first, then delete the SQS message + if self.s3_url: + key = self._get_s3_object(self.s3_url) + key.delete() + super(BigMessage, self).delete() + diff -Nru python-boto-2.20.1/boto/sqs/connection.py python-boto-2.29.1/boto/sqs/connection.py --- python-boto-2.20.1/boto/sqs/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -19,6 +19,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +import boto from boto.connection import AWSQueryConnection from boto.sqs.regioninfo import SQSRegionInfo from boto.sqs.queue import Queue @@ -32,9 +33,10 @@ """ A Connection to the SQS Service. """ - DefaultRegionName = 'us-east-1' - DefaultRegionEndpoint = 'queue.amazonaws.com' - APIVersion = '2012-11-05' + DefaultRegionName = boto.config.get('Boto', 'sqs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sqs_region_endpoint', + 'queue.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sqs_version', '2012-11-05') DefaultContentType = 'text/plain' ResponseError = SQSError AuthServiceName = 'sqs' @@ -48,7 +50,7 @@ region = SQSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region - AWSQueryConnection.__init__(self, aws_access_key_id, + super(SQSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, @@ -113,7 +115,7 @@ Gets one or all attributes of a Queue :type queue: A Queue object - :param queue: The SQS queue to be deleted + :param queue: The SQS queue to get attributes for :type attribute: str :type attribute: The specific attribute requested. If not @@ -127,6 +129,7 @@ * LastModifiedTimestamp * Policy * ReceiveMessageWaitTimeSeconds + * RedrivePolicy :rtype: :class:`boto.sqs.attributes.Attributes` :return: An Attributes object containing request value(s). @@ -141,7 +144,7 @@ def receive_message(self, queue, number_messages=1, visibility_timeout=None, attributes=None, - wait_time_seconds=None): + wait_time_seconds=None, message_attributes=None): """ Read messages from an SQS Queue. @@ -174,6 +177,11 @@ If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: list :return: A list of :class:`boto.sqs.message.Message` objects. @@ -185,6 +193,9 @@ self.build_list_params(params, attributes, 'AttributeName') if wait_time_seconds is not None: params['WaitTimeSeconds'] = wait_time_seconds + if message_attributes is not None: + self.build_list_params(params, message_attributes, + 'MessageAttributeName') return self.get_list('ReceiveMessage', params, [('Message', queue.message_class)], queue.id, queue) @@ -241,10 +252,61 @@ params = {'ReceiptHandle' : receipt_handle} return self.get_status('DeleteMessage', params, queue.id) - def send_message(self, queue, message_content, delay_seconds=None): + def send_message(self, queue, message_content, delay_seconds=None, + message_attributes=None): + """ + Send a new message to the queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type message_content: string + :param message_content: The body of the message + + :type delay_seconds: int + :param delay_seconds: Number of seconds (0 - 900) to delay this + message from being processed. + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + { + "name1": { + "data_type": "Number", + "string_value": "1" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + """ params = {'MessageBody' : message_content} if delay_seconds: params['DelaySeconds'] = int(delay_seconds) + + if message_attributes is not None: + for i, name in enumerate(message_attributes.keys(), start=1): + attribute = message_attributes[name] + params['MessageAttribute.%s.Name' % i] = name + if 'data_type' in attribute: + params['MessageAttribute.%s.Value.DataType' % i] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttribute.%s.Value.StringValue' % i] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttribute.%s.Value.BinaryValue' % i] = \ + attribute['binary_value'] + if 'string_list_value' in attribute: + params['MessageAttribute.%s.Value.StringListValue' % i] = \ + attribute['string_list_value'] + if 'binary_list_value' in attribute: + params['MessageAttribute.%s.Value.BinaryListValue' % i] = \ + attribute['binary_list_value'] + return self.get_object('SendMessage', params, Message, queue.id, verb='POST') @@ -260,19 +322,44 @@ tuple represents a single message to be written and consists of and ID (string) that must be unique within the list of messages, the message body itself - which can be a maximum of 64K in length, and an + which can be a maximum of 64K in length, an integer which represents the delay time (in seconds) for the message (0-900) before the message will - be delivered to the queue. + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + above. + """ params = {} for i, msg in enumerate(messages): - p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1) - params[p_name] = msg[0] - p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1) - params[p_name] = msg[1] - p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1) - params[p_name] = msg[2] + base = 'SendMessageBatchRequestEntry.%i' % (i + 1) + params['%s.Id' % base] = msg[0] + params['%s.MessageBody' % base] = msg[1] + params['%s.DelaySeconds' % base] = msg[2] + if len(msg) > 3: + base += '.MessageAttribute' + for j, name in enumerate(msg[3].keys()): + attribute = msg[3][name] + + p_name = '%s.%i.Name' % (base, j + 1) + params[p_name] = name + + if 'data_type' in attribute: + p_name = '%s.%i.DataType' % (base, j + 1) + params[p_name] = attribute['data_type'] + if 'string_value' in attribute: + p_name = '%s.%i.StringValue' % (base, j + 1) + params[p_name] = attribute['string_value'] + if 'binary_value' in attribute: + p_name = '%s.%i.BinaryValue' % (base, j + 1) + params[p_name] = attribute['binary_value'] + if 'string_list_value' in attribute: + p_name = '%s.%i.StringListValue' % (base, j + 1) + params[p_name] = attribute['string_list_value'] + if 'binary_list_value' in attribute: + p_name = '%s.%i.BinaryListValue' % (base, j + 1) + params[p_name] = attribute['binary_list_value'] + return self.get_object('SendMessageBatch', params, BatchResults, queue.id, verb='POST') @@ -357,6 +444,19 @@ lookup = get_queue + def get_dead_letter_source_queues(self, queue): + """ + Retrieves the dead letter source queues for a given queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The queue for which to get DL source queues + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {'QueueUrl': queue.url} + return self.get_list('ListDeadLetterSourceQueues', params, + [('QueueUrl', Queue)]) + # # Permissions methods # diff -Nru python-boto-2.20.1/boto/sqs/__init__.py python-boto-2.29.1/boto/sqs/__init__.py --- python-boto-2.20.1/boto/sqs/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,6 +21,7 @@ # from regioninfo import SQSRegionInfo +from boto.regioninfo import get_regions def regions(): @@ -30,25 +31,10 @@ :rtype: list :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` """ - return [SQSRegionInfo(name='us-east-1', - endpoint='queue.amazonaws.com'), - SQSRegionInfo(name='us-gov-west-1', - endpoint='sqs.us-gov-west-1.amazonaws.com'), - SQSRegionInfo(name='eu-west-1', - endpoint='eu-west-1.queue.amazonaws.com'), - SQSRegionInfo(name='us-west-1', - endpoint='us-west-1.queue.amazonaws.com'), - SQSRegionInfo(name='us-west-2', - endpoint='us-west-2.queue.amazonaws.com'), - SQSRegionInfo(name='sa-east-1', - endpoint='sa-east-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-northeast-1', - endpoint='ap-northeast-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-southeast-1', - endpoint='ap-southeast-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-southeast-2', - endpoint='ap-southeast-2.queue.amazonaws.com') - ] + return get_regions( + 'sqs', + region_cls=SQSRegionInfo + ) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/sqs/messageattributes.py python-boto-2.29.1/boto/sqs/messageattributes.py --- python-boto-2.20.1/boto/sqs/messageattributes.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/messageattributes.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS MessageAttribute Name/Value set +""" + +class MessageAttributes(dict): + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + if name == 'Value': + self.current_value = MessageAttributeValue(self) + return self.current_value + + def endElement(self, name, value, connection): + if name == 'MessageAttribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + pass + else: + setattr(self, name, value) + + +class MessageAttributeValue(dict): + def __init__(self, parent): + self.parent = parent + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'DataType': + self['data_type'] = value + elif name == 'StringValue': + self['string_value'] = value + elif name == 'BinaryValue': + self['binary_value'] = value + elif name == 'StringListValue': + self['string_list_value'] = value + elif name == 'BinaryListValue': + self['binary_list_value'] = value diff -Nru python-boto-2.20.1/boto/sqs/message.py python-boto-2.29.1/boto/sqs/message.py --- python-boto-2.20.1/boto/sqs/message.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/message.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -28,7 +28,7 @@ http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes -back out. However, to allow messages to have richer semantics, the Message class must support the +back out. However, to allow messages to have richer semantics, the Message class must support the following interfaces: The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a @@ -66,17 +66,18 @@ import base64 import StringIO from boto.sqs.attributes import Attributes +from boto.sqs.messageattributes import MessageAttributes from boto.exception import SQSDecodeError import boto -class RawMessage: +class RawMessage(object): """ Base class for SQS messages. RawMessage does not encode the message in any way. Whatever you store in the body of the message is what will be written to SQS and whatever is returned from SQS is stored directly into the body of the message. """ - + def __init__(self, queue=None, body=''): self.queue = queue self.set_body(body) @@ -84,6 +85,8 @@ self.receipt_handle = None self.md5 = None self.attributes = Attributes(self) + self.message_attributes = MessageAttributes(self) + self.md5_message_attributes = None def __len__(self): return len(self.encode(self._body)) @@ -91,6 +94,8 @@ def startElement(self, name, attrs, connection): if name == 'Attribute': return self.attributes + if name == 'MessageAttribute': + return self.message_attributes return None def endElement(self, name, value, connection): @@ -100,8 +105,10 @@ self.id = value elif name == 'ReceiptHandle': self.receipt_handle = value - elif name == 'MD5OfMessageBody': + elif name == 'MD5OfBody': self.md5 = value + elif name == 'MD5OfMessageAttributes': + self.md5_message_attributes = value else: setattr(self, name, value) @@ -115,14 +122,14 @@ def decode(self, value): """Transform seralized byte array into any object.""" return value - + def set_body(self, body): """Override the current body for this object, using decoded format.""" self._body = body def get_body(self): return self._body - + def get_body_encoded(self): """ This method is really a semi-private method used by the Queue.write @@ -140,7 +147,7 @@ self.queue.connection.change_message_visibility(self.queue, self.receipt_handle, visibility_timeout) - + class Message(RawMessage): """ The default Message class used for SQS queues. This class automatically @@ -152,7 +159,7 @@ for details on why this is a good idea. The encode/decode is meant to be transparent to the end-user. """ - + def encode(self, value): return base64.b64encode(value) @@ -177,9 +184,9 @@ """ def __init__(self, queue=None, body=None, xml_attrs=None): - if body == None or body == '': + if body is None or body == '': body = {} - Message.__init__(self, queue, body) + super(MHMessage, self).__init__(queue, body) def decode(self, value): try: @@ -251,9 +258,9 @@ value = base64.b64decode(value) except: raise SQSDecodeError('Unable to decode message', self) - return MHMessage.decode(self, value) + return super(EncodedMHMessage, self).decode(value) def encode(self, value): - value = MHMessage.encode(self, value) + value = super(EncodedMHMessage, self).encode(value) return base64.b64encode(value) - + diff -Nru python-boto-2.20.1/boto/sqs/queue.py python-boto-2.29.1/boto/sqs/queue.py --- python-boto-2.20.1/boto/sqs/queue.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/queue.py 2014-05-30 20:49:34.000000000 +0000 @@ -27,7 +27,7 @@ from boto.sqs.message import Message -class Queue: +class Queue(object): def __init__(self, connection=None, url=None, message_class=Message): self.connection = connection @@ -182,7 +182,8 @@ """ return self.connection.remove_permission(self, label) - def read(self, visibility_timeout=None, wait_time_seconds=None): + def read(self, visibility_timeout=None, wait_time_seconds=None, + message_attributes=None): """ Read a single message from the queue. @@ -195,11 +196,17 @@ If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: :class:`boto.sqs.message.Message` :return: A single message or None if queue is empty """ rs = self.get_messages(1, visibility_timeout, - wait_time_seconds=wait_time_seconds) + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) if len(rs) == 1: return rs[0] else: @@ -216,8 +223,8 @@ :return: The :class:`boto.sqs.message.Message` object that was written. """ new_msg = self.connection.send_message(self, - message.get_body_encoded(), - delay_seconds) + message.get_body_encoded(), delay_seconds=delay_seconds, + message_attributes=message.message_attributes) message.id = new_msg.id message.md5 = new_msg.md5 return message @@ -231,14 +238,16 @@ tuple represents a single message to be written and consists of and ID (string) that must be unique within the list of messages, the message body itself - which can be a maximum of 64K in length, and an + which can be a maximum of 64K in length, an integer which represents the delay time (in seconds) for the message (0-900) before the message will - be delivered to the queue. + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + in the connection class. """ return self.connection.send_message_batch(self, messages) - def new_message(self, body=''): + def new_message(self, body='', **kwargs): """ Create new message of appropriate class. @@ -248,13 +257,14 @@ :rtype: :class:`boto.sqs.message.Message` :return: A new Message object """ - m = self.message_class(self, body) + m = self.message_class(self, body, **kwargs) m.queue = self return m # get a variable number of messages, returns a list of messages def get_messages(self, num_messages=1, visibility_timeout=None, - attributes=None, wait_time_seconds=None): + attributes=None, wait_time_seconds=None, + message_attributes=None): """ Get a variable number of messages. @@ -278,13 +288,19 @@ If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: list :return: A list of :class:`boto.sqs.message.Message` objects. """ return self.connection.receive_message( self, number_messages=num_messages, visibility_timeout=visibility_timeout, attributes=attributes, - wait_time_seconds=wait_time_seconds) + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) def delete_message(self, message): """ diff -Nru python-boto-2.20.1/boto/sqs/regioninfo.py python-boto-2.29.1/boto/sqs/regioninfo.py --- python-boto-2.20.1/boto/sqs/regioninfo.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sqs/regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -16,7 +16,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -26,7 +26,8 @@ class SQSRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.sqs.connection import SQSConnection - RegionInfo.__init__(self, connection, name, endpoint, + super(SQSRegionInfo, self).__init__(connection, name, endpoint, SQSConnection) diff -Nru python-boto-2.20.1/boto/sts/connection.py python-boto-2.29.1/boto/sts/connection.py --- python-boto-2.20.1/boto/sts/connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sts/connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -69,7 +69,8 @@ is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - converter=None, validate_certs=True, anon=False): + converter=None, validate_certs=True, anon=False, + security_token=None, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, @@ -77,13 +78,15 @@ self.region = region self.anon = anon self._mutex = threading.Semaphore() - AWSQueryConnection.__init__(self, aws_access_key_id, + super(STSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, - validate_certs=validate_certs) + validate_certs=validate_certs, + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): if self.anon: @@ -235,7 +238,9 @@ FederationToken, verb='POST') def assume_role(self, role_arn, role_session_name, policy=None, - duration_seconds=None, external_id=None): + duration_seconds=None, external_id=None, + mfa_serial_number=None, + mfa_token=None): """ Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) @@ -325,6 +330,24 @@ information about the external ID, see `About the External ID`_ in Using Temporary Security Credentials . + :type mfa_serial_number: string + :param mfa_serial_number: The identification number of the MFA device that + is associated with the user who is making the AssumeRole call. + Specify this value if the trust policy of the role being assumed + includes a condition that requires MFA authentication. The value is + either the serial number for a hardware device (such as + GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device + (such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9. + Maximum length of 256. + + :type mfa_token: string + :param mfa_token: The value provided by the MFA device, if the trust + policy of the role being assumed requires MFA (that is, if the + policy includes a condition that tests for MFA). If the role being + assumed requires MFA and if the TokenCode value is missing or + expired, the AssumeRole call returns an "access denied" errror. + Minimum length of 6. Maximum length of 6. + """ params = { 'RoleArn': role_arn, @@ -336,6 +359,10 @@ params['DurationSeconds'] = duration_seconds if external_id is not None: params['ExternalId'] = external_id + if mfa_serial_number is not None: + params['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + params['TokenCode'] = mfa_token return self.get_object('AssumeRole', params, AssumedRole, verb='POST') def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion, diff -Nru python-boto-2.20.1/boto/sts/__init__.py python-boto-2.29.1/boto/sts/__init__.py --- python-boto-2.20.1/boto/sts/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/sts/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,7 +21,7 @@ # IN THE SOFTWARE. from connection import STSConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,14 +31,7 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='sts.amazonaws.com', - connection_cls=STSConnection), - RegionInfo(name='us-gov-west-1', - endpoint='sts.us-gov-west-1.amazonaws.com', - connection_cls=STSConnection) - - ] + return get_regions('sts', connection_cls=STSConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/support/__init__.py python-boto-2.29.1/boto/support/__init__.py --- python-boto-2.20.1/boto/support/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/support/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,13 +31,7 @@ :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.support.layer1 import SupportConnection - return [ - RegionInfo( - name='us-east-1', - endpoint='support.us-east-1.amazonaws.com', - connection_cls=SupportConnection - ), - ] + return get_regions('support', connection_cls=SupportConnection) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/support/layer1.py python-boto-2.29.1/boto/support/layer1.py --- python-boto-2.20.1/boto/support/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/support/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,11 @@ # IN THE SOFTWARE. # -import json +try: + import json +except ImportError: + import simplejson as json + import boto from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo @@ -33,56 +37,56 @@ AWS Support The AWS Support API reference is intended for programmers who need detailed information about the AWS Support actions and data types. - This service enables you to manage with your AWS Support cases - programmatically. It is built on the AWS Query API programming - model and provides HTTP methods that take parameters and return - results in JSON format. + This service enables you to manage your AWS Support cases + programmatically. It uses HTTP methods that return results in JSON + format. The AWS Support service also exposes a set of `Trusted Advisor`_ - features. You can retrieve a list of checks you can run on your - resources, specify checks to run and refresh, and check the status - of checks you have submitted. + features. You can retrieve a list of checks and their + descriptions, get check results, specify checks to refresh, and + get the refresh status of checks. The following list describes the AWS Support case management actions: + **Service names, issue categories, and available severity - levels. **The actions `DescribeServices`_ and - `DescribeSeverityLevels`_ enable you to obtain AWS service names, - service codes, service categories, and problem severity levels. - You use these values when you call the `CreateCase`_ action. - + **Case Creation, case details, and case resolution**. The - actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable - you to create AWS Support cases, retrieve them, and resolve them. - + **Case communication**. The actions - `DescribeCaseCommunications`_ and `AddCommunicationToCase`_ enable - you to retrieve and add communication to AWS Support cases. + levels. **The actions DescribeServices and DescribeSeverityLevels + enable you to obtain AWS service names, service codes, service + categories, and problem severity levels. You use these values when + you call the CreateCase action. + + **Case creation, case details, and case resolution.** The + actions CreateCase, DescribeCases, and ResolveCase enable you to + create AWS Support cases, retrieve them, and resolve them. + + **Case communication.** The actions DescribeCommunications and + AddCommunicationToCase enable you to retrieve and add + communication to AWS Support cases. The following list describes the actions available from the AWS Support service for Trusted Advisor: - + `DescribeTrustedAdviserChecks`_ returns the list of checks that you can run against your AWS - resources. + + DescribeTrustedAdvisorChecks returns the list of checks that run + against your AWS resources. + Using the CheckId for a specific check returned by - DescribeTrustedAdviserChecks, you can call - `DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified. - + Using `DescribeTrustedAdvisorCheckSummaries`_, you can get - summaries for a set of Trusted Advisor checks. - + `RefreshTrustedAdvisorCheck`_ enables you to request that - Trusted Advisor run the check again. - + ``_ gets statuses on the checks you are running. + DescribeTrustedAdvisorChecks, you can call + DescribeTrustedAdvisorCheckResult to obtain the results for the + check you specified. + + DescribeTrustedAdvisorCheckSummaries returns summarized results + for one or more Trusted Advisor checks. + + RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a + specified check. + + DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh + status of one or more checks. - For authentication of requests, the AWS Support uses `Signature + For authentication of requests, AWS Support uses `Signature Version 4 Signing Process`_. - See the AWS Support Developer Guide for information about how to - use this service to manage create and manage your support cases, - and how to call Trusted Advisor for results of checks on your - resources. + See the AWS Support `User Guide`_ for information about how to use + this service to create and manage your support cases, and how to + call Trusted Advisor for results of checks on your resources. """ APIVersion = "2013-04-15" DefaultRegionName = "us-east-1" @@ -104,7 +108,7 @@ region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) kwargs['host'] = region.endpoint - AWSQueryConnection.__init__(self, **kwargs) + super(SupportConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): @@ -113,27 +117,30 @@ def add_communication_to_case(self, communication_body, case_id=None, cc_email_addresses=None): """ - This action adds additional customer communication to an AWS - Support case. You use the CaseId value to identify the case to - which you want to add communication. You can list a set of - email addresses to copy on the communication using the - CcEmailAddresses value. The CommunicationBody value contains - the text of the communication. + Adds additional customer communication to an AWS Support case. + You use the `CaseId` value to identify the case to add + communication to. You can list a set of email addresses to + copy on the communication using the `CcEmailAddresses` value. + The `CommunicationBody` value contains the text of the + communication. - This action's response indicates the success or failure of the - request. + The response indicates the success or failure of the request. - This action implements a subset of the behavior on the AWS + This operation implements a subset of the behavior on the AWS Support `Your Support Cases`_ web form. :type case_id: string - :param case_id: + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 :type communication_body: string - :param communication_body: + :param communication_body: The body of an email communication to add to + the support case. :type cc_email_addresses: list - :param cc_email_addresses: + :param cc_email_addresses: The email addresses in the CC line of an + email to be added to the support case. """ params = {'communicationBody': communication_body, } @@ -144,84 +151,105 @@ return self.make_request(action='AddCommunicationToCase', body=json.dumps(params)) - def create_case(self, subject, service_code, category_code, - communication_body, severity_code=None, + def create_case(self, subject, communication_body, service_code=None, + severity_code=None, category_code=None, cc_email_addresses=None, language=None, issue_type=None): """ - Creates a new case in the AWS Support Center. This action is - modeled on the behavior of the AWS Support Center `Open a new - case`_ page. Its parameters require you to specify the + Creates a new case in the AWS Support Center. This operation + is modeled on the behavior of the AWS Support Center `Open a + new case`_ page. Its parameters require you to specify the following information: - #. **ServiceCode.** Represents a code for an AWS service. You - obtain the ServiceCode by calling `DescribeServices`_. - #. **CategoryCode**. Represents a category for the service - defined for the ServiceCode value. You also obtain the - cateogory code for a service by calling `DescribeServices`_. - Each AWS service defines its own set of category codes. - #. **SeverityCode**. Represents a value that specifies the - urgency of the case, and the time interval in which your - service level agreement specifies a response from AWS Support. - You obtain the SeverityCode by calling - `DescribeSeverityLevels`_. - #. **Subject**. Represents the **Subject** field on the AWS + #. **ServiceCode.** The code for an AWS service. You obtain + the `ServiceCode` by calling DescribeServices. + #. **CategoryCode.** The category for the service defined for + the `ServiceCode` value. You also obtain the category code for + a service by calling DescribeServices. Each AWS service + defines its own set of category codes. + #. **SeverityCode.** A value that indicates the urgency of the + case, which in turn determines the response time according to + your service level agreement with AWS Support. You obtain the + SeverityCode by calling DescribeSeverityLevels. + #. **Subject.** The **Subject** field on the AWS Support + Center `Open a new case`_ page. + #. **CommunicationBody.** The **Description** field on the AWS Support Center `Open a new case`_ page. - #. **CommunicationBody**. Represents the **Description** field - on the AWS Support Center `Open a new case`_ page. - #. **Language**. Specifies the human language in which AWS - Support handles the case. The API currently supports English - and Japanese. - #. **CcEmailAddresses**. Represents the AWS Support Center - **CC** field on the `Open a new case`_ page. You can list - email addresses to be copied on any correspondence about the - case. The account that opens the case is already identified by - passing the AWS Credentials in the HTTP POST method or in a - method or function call from one of the programming languages - supported by an `AWS SDK`_. + #. **Language.** The human language in which AWS Support + handles the case. English and Japanese are currently + supported. + #. **CcEmailAddresses.** The AWS Support Center **CC** field + on the `Open a new case`_ page. You can list email addresses + to be copied on any correspondence about the case. The account + that opens the case is already identified by passing the AWS + Credentials in the HTTP POST method or in a method or function + call from one of the programming languages supported by an + `AWS SDK`_. + #. **IssueType.** The type of issue for the case. You can + specify either "customer-service" or "technical." If you do + not indicate a value, the default is "technical." + The AWS Support API does not currently support the ability to add attachments to cases. You can, however, call - `AddCommunicationToCase`_ to add information to an open case. + AddCommunicationToCase to add information to an open case. - A successful `CreateCase`_ request returns an AWS Support case - number. Case numbers are used by `DescribeCases`_ request to - retrieve existing AWS Support support cases. + + A successful CreateCase request returns an AWS Support case + number. Case numbers are used by the DescribeCases action to + retrieve existing AWS Support cases. :type subject: string - :param subject: + :param subject: The title of the AWS Support case. :type service_code: string - :param service_code: + :param service_code: The code for the AWS service returned by the call + to DescribeServices. :type severity_code: string :param severity_code: + The code for the severity level returned by the call to + DescribeSeverityLevels. + + + The availability of severity levels depends on each customer's support + subscription. In other words, your subscription may not necessarily + require the urgent level of response time. :type category_code: string - :param category_code: + :param category_code: The category of problem for the AWS Support case. :type communication_body: string - :param communication_body: + :param communication_body: The communication body text when you create + an AWS Support case by calling CreateCase. :type cc_email_addresses: list - :param cc_email_addresses: + :param cc_email_addresses: A list of email addresses that AWS Support + copies on case correspondence. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. :type issue_type: string - :param issue_type: + :param issue_type: The type of issue for the case. You can specify + either "customer-service" or "technical." If you do not indicate a + value, the default is "technical." """ params = { 'subject': subject, - 'serviceCode': service_code, - 'categoryCode': category_code, 'communicationBody': communication_body, } + if service_code is not None: + params['serviceCode'] = service_code if severity_code is not None: params['severityCode'] = severity_code + if category_code is not None: + params['categoryCode'] = category_code if cc_email_addresses is not None: params['ccEmailAddresses'] = cc_email_addresses if language is not None: @@ -236,39 +264,51 @@ include_resolved_cases=None, next_token=None, max_results=None, language=None): """ - This action returns a list of cases that you specify by - passing one or more CaseIds. In addition, you can filter the - cases by date by setting values for the AfterTime and - BeforeTime request parameters. + Returns a list of cases that you specify by passing one or + more case IDs. In addition, you can filter the cases by date + by setting values for the `AfterTime` and `BeforeTime` request + parameters. + The response returns the following in JSON format: - #. One or more `CaseDetails`_ data types. - #. One or more NextToken objects, strings that specifies where - to paginate the returned records represented by CaseDetails . + + #. One or more CaseDetails data types. + #. One or more `NextToken` values, which specify where to + paginate the returned records represented by the `CaseDetails` + objects. :type case_id_list: list - :param case_id_list: + :param case_id_list: A list of ID numbers of the support cases you want + returned. The maximum number of cases is 100. :type display_id: string - :param display_id: + :param display_id: The ID displayed for a case in the AWS Support + Center user interface. :type after_time: string - :param after_time: + :param after_time: The start date for a filtered date search on support + case communications. :type before_time: string - :param before_time: + :param before_time: The end date for a filtered date search on support + case communications. :type include_resolved_cases: boolean - :param include_resolved_cases: + :param include_resolved_cases: Specifies whether resolved support cases + should be included in the DescribeCases results. :type next_token: string - :param next_token: + :param next_token: A resumption point for pagination. :type max_results: integer - :param max_results: + :param max_results: The maximum number of results to return before + paginating. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -295,30 +335,35 @@ after_time=None, next_token=None, max_results=None): """ - This action returns communications regarding the support case. - You can use the AfterTime and BeforeTime parameters to filter - by date. The CaseId parameter enables you to identify a - specific case by its CaseId number. - - The MaxResults and NextToken parameters enable you to control - the pagination of the result set. Set MaxResults to the number - of cases you want displayed on each page, and use NextToken to - specify the resumption of pagination. + Returns communications regarding the support case. You can use + the `AfterTime` and `BeforeTime` parameters to filter by date. + The `CaseId` parameter enables you to identify a specific case + by its `CaseId` value. + + The `MaxResults` and `NextToken` parameters enable you to + control the pagination of the result set. Set `MaxResults` to + the number of cases you want displayed on each page, and use + `NextToken` to specify the resumption of pagination. :type case_id: string - :param case_id: + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 :type before_time: string - :param before_time: + :param before_time: The end date for a filtered date search on support + case communications. :type after_time: string - :param after_time: + :param after_time: The start date for a filtered date search on support + case communications. :type next_token: string - :param next_token: + :param next_token: A resumption point for pagination. :type max_results: integer - :param max_results: + :param max_results: The maximum number of results to return before + paginating. """ params = {'caseId': case_id, } @@ -337,7 +382,7 @@ """ Returns the current list of AWS services and a list of service categories that applies to each one. You then use service - names and categories in your `CreateCase`_ requests. Each AWS + names and categories in your CreateCase requests. Each AWS service has its own set of categories. The service codes and category codes correspond to the values @@ -351,10 +396,14 @@ category codes. :type service_code_list: list - :param service_code_list: + :param service_code_list: A JSON-formatted list of service codes + available for AWS services. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -367,13 +416,16 @@ def describe_severity_levels(self, language=None): """ - This action returns the list of severity levels that you can - assign to an AWS Support case. The severity level for a case - is also a field in the `CaseDetails`_ data type included in - any `CreateCase`_ request. + Returns the list of severity levels that you can assign to an + AWS Support case. The severity level for a case is also a + field in the CaseDetails data type included in any CreateCase + request. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -382,29 +434,14 @@ return self.make_request(action='DescribeSeverityLevels', body=json.dumps(params)) - def resolve_case(self, case_id=None): - """ - Takes a CaseId and returns the initial state of the case along - with the state of the case after the call to `ResolveCase`_ - completed. - - :type case_id: string - :param case_id: - - """ - params = {} - if case_id is not None: - params['caseId'] = case_id - return self.make_request(action='ResolveCase', - body=json.dumps(params)) - def describe_trusted_advisor_check_refresh_statuses(self, check_ids): """ - Returns the status of all refresh requests Trusted Advisor - checks called using `RefreshTrustedAdvisorCheck`_. + Returns the refresh status of the Trusted Advisor checks that + have the specified check IDs. Check IDs can be obtained by + calling DescribeTrustedAdvisorChecks. :type check_ids: list - :param check_ids: + :param check_ids: The IDs of the Trusted Advisor checks. """ params = {'checkIds': check_ids, } @@ -413,37 +450,35 @@ def describe_trusted_advisor_check_result(self, check_id, language=None): """ - This action responds with the results of a Trusted Advisor - check. Once you have obtained the list of available Trusted - Advisor checks by calling `DescribeTrustedAdvisorChecks`_, you - specify the CheckId for the check you want to retrieve from - AWS Support. - - The response for this action contains a JSON-formatted - `TrustedAdvisorCheckResult`_ object - , which is a container for the following three objects: + Returns the results of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + The response contains a TrustedAdvisorCheckResult object, + which contains these three objects: - #. `TrustedAdvisorCategorySpecificSummary`_ - #. `TrustedAdvisorResourceDetail`_ - #. `TrustedAdvisorResourcesSummary`_ + + TrustedAdvisorCategorySpecificSummary + + TrustedAdvisorResourceDetail + + TrustedAdvisorResourcesSummary - In addition, the response contains the following fields: + In addition, the response contains these fields: - #. **Status**. Overall status of the check. - #. **Timestamp**. Time at which Trusted Advisor last ran the - check. - #. **CheckId**. Unique identifier for the specific check - returned by the request. + + **Status.** The alert status of the check: "ok" (green), + "warning" (yellow), "error" (red), or "not_available". + + **Timestamp.** The time of the last refresh of the check. + + **CheckId.** The unique identifier for the check. :type check_id: string - :param check_id: + :param check_id: The unique identifier for the Trusted Advisor check. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {'checkId': check_id, } @@ -454,17 +489,15 @@ def describe_trusted_advisor_check_summaries(self, check_ids): """ - This action enables you to get the latest summaries for - Trusted Advisor checks that you specify in your request. You - submit the list of Trusted Advisor checks for which you want - summaries. You obtain these CheckIds by submitting a - `DescribeTrustedAdvisorChecks`_ request. + Returns the summaries of the results of the Trusted Advisor + checks that have the specified check IDs. Check IDs can be + obtained by calling DescribeTrustedAdvisorChecks. - The response body contains an array of - `TrustedAdvisorCheckSummary`_ objects. + The response contains an array of TrustedAdvisorCheckSummary + objects. :type check_ids: list - :param check_ids: + :param check_ids: The IDs of the Trusted Advisor checks. """ params = {'checkIds': check_ids, } @@ -473,14 +506,17 @@ def describe_trusted_advisor_checks(self, language): """ - This action enables you to get a list of the available Trusted - Advisor checks. You must specify a language code. English - ("en") and Japanese ("jp") are currently supported. The - response contains a list of `TrustedAdvisorCheckDescription`_ - objects. + Returns information about all available Trusted Advisor + checks, including name, ID, category, description, and + metadata. You must specify a language code; English ("en") and + Japanese ("ja") are currently supported. The response contains + a TrustedAdvisorCheckDescription for each check. :type language: string - :param language: + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {'language': language, } @@ -489,20 +525,46 @@ def refresh_trusted_advisor_check(self, check_id): """ - This action enables you to query the service to request a - refresh for a specific Trusted Advisor check. Your request - body contains a CheckId for which you are querying. The - response body contains a `RefreshTrustedAdvisorCheckResult`_ - object containing Status and TimeUntilNextRefresh fields. + Requests a refresh of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a RefreshTrustedAdvisorCheckResult + object, which contains these fields: + + + + **Status.** The refresh status of the check: "none", + "enqueued", "processing", "success", or "abandoned". + + **MillisUntilNextRefreshable.** The amount of time, in + milliseconds, until the check is eligible for refresh. + + **CheckId.** The unique identifier for the check. :type check_id: string - :param check_id: + :param check_id: The unique identifier for the Trusted Advisor check. """ params = {'checkId': check_id, } return self.make_request(action='RefreshTrustedAdvisorCheck', body=json.dumps(params)) + def resolve_case(self, case_id=None): + """ + Takes a `CaseId` and returns the initial state of the case + along with the state of the case after the call to ResolveCase + completed. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + """ + params = {} + if case_id is not None: + params['caseId'] = case_id + return self.make_request(action='ResolveCase', + body=json.dumps(params)) + def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), diff -Nru python-boto-2.20.1/boto/swf/__init__.py python-boto-2.29.1/boto/swf/__init__.py --- python-boto-2.20.1/boto/swf/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/swf/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,19 +23,10 @@ # from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import get_regions, load_regions import boto.swf.layer1 -REGION_ENDPOINTS = { - 'us-east-1': 'swf.us-east-1.amazonaws.com', - 'us-gov-west-1': 'swf.us-gov-west-1.amazonaws.com', - 'us-west-1': 'swf.us-west-1.amazonaws.com', - 'us-west-2': 'swf.us-west-2.amazonaws.com', - 'sa-east-1': 'swf.sa-east-1.amazonaws.com', - 'eu-west-1': 'swf.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'swf.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'swf.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'swf.ap-southeast-2.amazonaws.com', -} +REGION_ENDPOINTS = load_regions().get('swf', {}) def regions(**kw_params): @@ -45,9 +36,7 @@ :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ - return [RegionInfo(name=region_name, endpoint=REGION_ENDPOINTS[region_name], - connection_cls=boto.swf.layer1.Layer1) - for region_name in REGION_ENDPOINTS] + return get_regions('swf', connection_cls=boto.swf.layer1.Layer1) def connect_to_region(region_name, **kw_params): diff -Nru python-boto-2.20.1/boto/swf/layer1_decisions.py python-boto-2.29.1/boto/swf/layer1_decisions.py --- python-boto-2.20.1/boto/swf/layer1_decisions.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/swf/layer1_decisions.py 2014-05-30 20:49:34.000000000 +0000 @@ -3,7 +3,7 @@ """ -class Layer1Decisions: +class Layer1Decisions(object): """ Use this object to build a list of decisions for a decision response. Each method call will add append a new decision. Retrieve the list diff -Nru python-boto-2.20.1/boto/swf/layer1.py python-boto-2.29.1/boto/swf/layer1.py --- python-boto-2.20.1/boto/swf/layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/swf/layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -69,7 +69,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, - debug=0, session_token=None, region=None): + debug=0, session_token=None, region=None, profile_name=None): if not region: region_name = boto.config.get('SWF', 'region', self.DefaultRegionName) @@ -79,10 +79,10 @@ break self.region = region - AWSAuthConnection.__init__(self, self.region.endpoint, + super(Layer1, self).__init__(self.region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, - debug, session_token) + debug, session_token, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] diff -Nru python-boto-2.20.1/boto/swf/layer2.py python-boto-2.29.1/boto/swf/layer2.py --- python-boto-2.20.1/boto/swf/layer2.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/swf/layer2.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,6 +23,7 @@ domain = None aws_access_key_id = None aws_secret_access_key = None + region = None def __init__(self, **kwargs): # Set default credentials. @@ -33,8 +34,9 @@ for kwarg in kwargs: setattr(self, kwarg, kwargs[kwarg]) - self._swf = Layer1(self.aws_access_key_id, - self.aws_secret_access_key) + self._swf = Layer1(self.aws_access_key_id, + self.aws_secret_access_key, + region=self.region) def __repr__(self): rep_str = str(self.name) diff -Nru python-boto-2.20.1/boto/utils.py python-boto-2.29.1/boto/utils.py --- python-boto-2.20.1/boto/utils.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/utils.py 2014-05-30 20:49:34.000000000 +0000 @@ -195,7 +195,7 @@ metadata = {} for hkey in headers.keys(): if hkey.lower().startswith(metadata_prefix): - val = urllib.unquote_plus(headers[hkey]) + val = urllib.unquote(headers[hkey]) try: metadata[hkey[len(metadata_prefix):]] = unicode(val, 'utf-8') except UnicodeDecodeError: @@ -600,7 +600,7 @@ We have extended the constructor to accept a username/password for SMTP authentication. """ - logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, + super(AuthSMTPHandler, self).__init__(mailhost, fromaddr, toaddrs, subject) self.username = username self.password = password @@ -843,7 +843,7 @@ def get_utf8_value(value): - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): return value.encode('utf-8') @@ -1025,3 +1025,12 @@ matching_headers = find_matching_headers(name, headers) return ','.join(str(headers[h]) for h in matching_headers if headers[h] is not None) + +class RequestHook(object): + """ + This can be extended and supplied to the connection object + to gain access to request and response object after the request completes. + One use for this would be to implement some specific request logging. + """ + def handle_request_data(self, request, response, error=False): + pass diff -Nru python-boto-2.20.1/boto/vpc/customergateway.py python-boto-2.29.1/boto/vpc/customergateway.py --- python-boto-2.20.1/boto/vpc/customergateway.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/customergateway.py 2014-05-30 20:49:34.000000000 +0000 @@ -29,7 +29,7 @@ class CustomerGateway(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(CustomerGateway, self).__init__(connection) self.id = None self.type = None self.state = None diff -Nru python-boto-2.20.1/boto/vpc/dhcpoptions.py python-boto-2.29.1/boto/vpc/dhcpoptions.py --- python-boto-2.20.1/boto/vpc/dhcpoptions.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/dhcpoptions.py 2014-05-30 20:49:34.000000000 +0000 @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -33,7 +33,7 @@ def endElement(self, name, value, connection): if name == 'value': self.append(value) - + class DhcpConfigSet(dict): def startElement(self, name, attrs, connection): @@ -45,19 +45,19 @@ def endElement(self, name, value, connection): if name == 'key': self._name = value - + class DhcpOptions(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(DhcpOptions, self).__init__(connection) self.id = None self.options = None def __repr__(self): return 'DhcpOptions:%s' % self.id - + def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(DhcpOptions, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'dhcpConfigurationSet': diff -Nru python-boto-2.20.1/boto/vpc/__init__.py python-boto-2.29.1/boto/vpc/__init__.py --- python-boto-2.20.1/boto/vpc/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -35,7 +35,7 @@ from boto.vpc.subnet import Subnet from boto.vpc.vpnconnection import VpnConnection from boto.ec2 import RegionData -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(**kw_params): @@ -48,16 +48,7 @@ :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=VPCConnection) - regions.append(region) - regions.append(RegionInfo(name='us-gov-west-1', - endpoint=RegionData[region_name], - connection_cls=VPCConnection)) - return regions + return get_regions('ec2', connection_cls=VPCConnection) def connect_to_region(region_name, **kw_params): @@ -1125,7 +1116,7 @@ - *state*, a list of states of the Subnet (pending,available) - - *vpcId*, a list of IDs of teh VPC the subnet is in. + - *vpcId*, a list of IDs of the VPC that the subnet is in. - *cidrBlock*, a list of CIDR blocks of the subnet - *availabilityZone*, list of the Availability Zones the subnet is in. diff -Nru python-boto-2.20.1/boto/vpc/internetgateway.py python-boto-2.29.1/boto/vpc/internetgateway.py --- python-boto-2.20.1/boto/vpc/internetgateway.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/internetgateway.py 2014-05-30 20:49:34.000000000 +0000 @@ -28,7 +28,7 @@ class InternetGateway(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(InternetGateway, self).__init__(connection) self.id = None self.attachments = [] diff -Nru python-boto-2.20.1/boto/vpc/networkacl.py python-boto-2.29.1/boto/vpc/networkacl.py --- python-boto-2.20.1/boto/vpc/networkacl.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/networkacl.py 2014-05-30 20:49:34.000000000 +0000 @@ -51,7 +51,7 @@ class NetworkAcl(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(NetworkAcl, self).__init__(connection) self.id = None self.vpc_id = None self.network_acl_entries = [] diff -Nru python-boto-2.20.1/boto/vpc/routetable.py python-boto-2.29.1/boto/vpc/routetable.py --- python-boto-2.20.1/boto/vpc/routetable.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/routetable.py 2014-05-30 20:49:34.000000000 +0000 @@ -29,7 +29,7 @@ class RouteTable(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(RouteTable, self).__init__(connection) self.id = None self.vpc_id = None self.routes = [] diff -Nru python-boto-2.20.1/boto/vpc/subnet.py python-boto-2.29.1/boto/vpc/subnet.py --- python-boto-2.20.1/boto/vpc/subnet.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/subnet.py 2014-05-30 20:49:34.000000000 +0000 @@ -28,7 +28,7 @@ class Subnet(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(Subnet, self).__init__(connection) self.id = None self.vpc_id = None self.state = None diff -Nru python-boto-2.20.1/boto/vpc/vpc.py python-boto-2.29.1/boto/vpc/vpc.py --- python-boto-2.20.1/boto/vpc/vpc.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/vpc.py 2014-05-30 20:49:34.000000000 +0000 @@ -39,7 +39,7 @@ :ivar is_default: Indicates whether the VPC is the default VPC. :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC. """ - TaggedEC2Object.__init__(self, connection) + super(VPC, self).__init__(connection) self.id = None self.dhcp_options_id = None self.state = None diff -Nru python-boto-2.20.1/boto/vpc/vpnconnection.py python-boto-2.29.1/boto/vpc/vpnconnection.py --- python-boto-2.20.1/boto/vpc/vpnconnection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/vpnconnection.py 2014-05-30 20:49:34.000000000 +0000 @@ -152,7 +152,7 @@ """ def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(VpnConnection, self).__init__(connection) self.id = None self.state = None self.customer_gateway_configuration = None diff -Nru python-boto-2.20.1/boto/vpc/vpngateway.py python-boto-2.29.1/boto/vpc/vpngateway.py --- python-boto-2.20.1/boto/vpc/vpngateway.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/boto/vpc/vpngateway.py 2014-05-30 20:49:34.000000000 +0000 @@ -45,7 +45,7 @@ class VpnGateway(TaggedEC2Object): def __init__(self, connection=None): - TaggedEC2Object.__init__(self, connection) + super(VpnGateway, self).__init__(connection) self.id = None self.type = None self.state = None @@ -56,7 +56,7 @@ return 'VpnGateway:%s' % self.id def startElement(self, name, attrs, connection): - retval = TaggedEC2Object.startElement(self, name, attrs, connection) + retval = super(VpnGateway, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'item': diff -Nru python-boto-2.20.1/boto.egg-info/PKG-INFO python-boto-2.29.1/boto.egg-info/PKG-INFO --- python-boto-2.20.1/boto.egg-info/PKG-INFO 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/boto.egg-info/PKG-INFO 2014-07-25 19:29:13.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: boto -Version: 2.20.1 +Version: 2.27.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat @@ -9,9 +9,9 @@ Description: #### boto #### - boto 2.20.1 + boto 2.27.0 - Released: 13-December-2013 + Released: 6-March-2014 .. image:: https://travis-ci.org/boto/boto.png?branch=develop :target: https://travis-ci.org/boto/boto @@ -116,15 +116,15 @@ :: - $ pip install boto + $ pip install boto Install from source: :: - $ git clone git://github.com/boto/boto.git - $ cd boto - $ python setup.py install + $ git clone git://github.com/boto/boto.git + $ cd boto + $ python setup.py install ********** ChangeLogs diff -Nru python-boto-2.20.1/boto.egg-info/SOURCES.txt python-boto-2.29.1/boto.egg-info/SOURCES.txt --- python-boto-2.20.1/boto.egg-info/SOURCES.txt 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/boto.egg-info/SOURCES.txt 2014-07-25 19:29:13.000000000 +0000 @@ -30,6 +30,7 @@ boto/auth_handler.py boto/compat.py boto/connection.py +boto/endpoints.json boto/exception.py boto/handler.py boto/https_connection.py @@ -37,6 +38,7 @@ boto/plugin.py boto/provider.py boto/regioninfo.py +boto/requestlog.py boto/resultset.py boto/storage_uri.py boto/utils.py @@ -114,6 +116,7 @@ boto/ec2/instance.py boto/ec2/instanceinfo.py boto/ec2/instancestatus.py +boto/ec2/instancetype.py boto/ec2/keypair.py boto/ec2/launchspecification.py boto/ec2/networkinterface.py @@ -126,7 +129,6 @@ boto/ec2/spotinstancerequest.py boto/ec2/spotpricehistory.py boto/ec2/tag.py -boto/ec2/vmtype.py boto/ec2/volume.py boto/ec2/volumestatus.py boto/ec2/zone.py @@ -135,6 +137,7 @@ boto/ec2/autoscale/group.py boto/ec2/autoscale/instance.py boto/ec2/autoscale/launchconfig.py +boto/ec2/autoscale/limits.py boto/ec2/autoscale/policy.py boto/ec2/autoscale/request.py boto/ec2/autoscale/scheduled.py @@ -251,11 +254,15 @@ boto/rds/dbsnapshot.py boto/rds/dbsubnetgroup.py boto/rds/event.py +boto/rds/logfile.py boto/rds/optiongroup.py boto/rds/parametergroup.py boto/rds/regioninfo.py boto/rds/statusinfo.py boto/rds/vpcsecuritygroupmembership.py +boto/rds2/__init__.py +boto/rds2/exceptions.py +boto/rds2/layer1.py boto/redshift/__init__.py boto/redshift/exceptions.py boto/redshift/layer1.py @@ -266,6 +273,7 @@ boto/route53/__init__.py boto/route53/connection.py boto/route53/exception.py +boto/route53/healthcheck.py boto/route53/hostedzone.py boto/route53/record.py boto/route53/status.py @@ -322,6 +330,7 @@ boto/sqs/__init__.py boto/sqs/attributes.py boto/sqs/batchresults.py +boto/sqs/bigmessage.py boto/sqs/connection.py boto/sqs/jsonmessage.py boto/sqs/message.py @@ -369,6 +378,8 @@ docs/source/getting_started.rst docs/source/index.rst docs/source/rds_tut.rst +docs/source/request_hook_tut.rst +docs/source/route53_tut.rst docs/source/s3_tut.rst docs/source/security_groups.rst docs/source/ses_tut.rst @@ -383,6 +394,7 @@ docs/source/boto_theme/static/pygments.css docs/source/extensions/githublinks/__init__.py docs/source/migrations/dynamodb_v1_to_v2.rst +docs/source/migrations/rds_v1_to_v2.rst docs/source/ref/autoscale.rst docs/source/ref/beanstalk.rst docs/source/ref/boto.rst @@ -407,12 +419,14 @@ docs/source/ref/gs.rst docs/source/ref/iam.rst docs/source/ref/index.rst +docs/source/ref/kinesis.rst docs/source/ref/manage.rst docs/source/ref/mturk.rst docs/source/ref/mws.rst docs/source/ref/opsworks.rst docs/source/ref/pyami.rst docs/source/ref/rds.rst +docs/source/ref/rds2.rst docs/source/ref/redshift.rst docs/source/ref/route53.rst docs/source/ref/s3.rst @@ -449,6 +463,17 @@ docs/source/releasenotes/v2.2.2.rst docs/source/releasenotes/v2.20.0.rst docs/source/releasenotes/v2.20.1.rst +docs/source/releasenotes/v2.21.0.rst +docs/source/releasenotes/v2.21.1.rst +docs/source/releasenotes/v2.21.2.rst +docs/source/releasenotes/v2.22.0.rst +docs/source/releasenotes/v2.22.1.rst +docs/source/releasenotes/v2.23.0.rst +docs/source/releasenotes/v2.24.0.rst +docs/source/releasenotes/v2.25.0.rst +docs/source/releasenotes/v2.26.0.rst +docs/source/releasenotes/v2.26.1.rst +docs/source/releasenotes/v2.27.0.rst docs/source/releasenotes/v2.3.0.rst docs/source/releasenotes/v2.4.0.rst docs/source/releasenotes/v2.5.0.rst @@ -487,7 +512,10 @@ tests/integration/cloudsearch/test_cert_verification.py tests/integration/cloudsearch/test_layers.py tests/integration/cloudtrail/__init__.py +tests/integration/cloudtrail/test_cert_verification.py tests/integration/cloudtrail/test_cloudtrail.py +tests/integration/datapipeline/__init__.py +tests/integration/datapipeline/test_cert_verification.py tests/integration/datapipeline/test_layer1.py tests/integration/directconnect/__init__.py tests/integration/directconnect/test_directconnect.py @@ -538,6 +566,7 @@ tests/integration/iam/__init__.py tests/integration/iam/test_cert_verification.py tests/integration/kinesis/__init__.py +tests/integration/kinesis/test_cert_verification.py tests/integration/kinesis/test_kinesis.py tests/integration/mws/__init__.py tests/integration/mws/test.py @@ -547,11 +576,16 @@ tests/integration/rds/test_cert_verification.py tests/integration/rds/test_db_subnet_group.py tests/integration/rds/test_promote_modify.py +tests/integration/rds2/__init__.py +tests/integration/rds2/test_cert_verification.py +tests/integration/rds2/test_connection.py tests/integration/redshift/__init__.py tests/integration/redshift/test_cert_verification.py tests/integration/redshift/test_layer1.py tests/integration/route53/__init__.py +tests/integration/route53/test_alias_resourcerecordsets.py tests/integration/route53/test_cert_verification.py +tests/integration/route53/test_health_check.py tests/integration/route53/test_resourcerecordsets.py tests/integration/route53/test_zone.py tests/integration/s3/__init__.py @@ -580,6 +614,7 @@ tests/integration/sns/test_connection.py tests/integration/sns/test_sns_sqs_subscription.py tests/integration/sqs/__init__.py +tests/integration/sqs/test_bigmessage.py tests/integration/sqs/test_cert_verification.py tests/integration/sqs/test_connection.py tests/integration/storage_uri/__init__.py @@ -611,6 +646,7 @@ tests/unit/__init__.py tests/unit/test_connection.py tests/unit/test_exception.py +tests/unit/test_regioninfo.py tests/unit/auth/__init__.py tests/unit/auth/test_query.py tests/unit/auth/test_sigv4.py @@ -620,6 +656,7 @@ tests/unit/cloudformation/test_connection.py tests/unit/cloudformation/test_stack.py tests/unit/cloudfront/__init__.py +tests/unit/cloudfront/test_connection.py tests/unit/cloudfront/test_distribution.py tests/unit/cloudfront/test_invalidation_list.py tests/unit/cloudfront/test_signed_urls.py @@ -645,6 +682,7 @@ tests/unit/ec2/test_connection.py tests/unit/ec2/test_instance.py tests/unit/ec2/test_instancestatus.py +tests/unit/ec2/test_instancetype.py tests/unit/ec2/test_networkinterface.py tests/unit/ec2/test_securitygroup.py tests/unit/ec2/test_snapshot.py @@ -683,10 +721,13 @@ tests/unit/rds/__init__.py tests/unit/rds/test_connection.py tests/unit/rds/test_snapshot.py +tests/unit/rds2/__init__.py +tests/unit/rds2/test_connection.py tests/unit/route53/__init__.py tests/unit/route53/test_connection.py tests/unit/s3/__init__.py tests/unit/s3/test_bucket.py +tests/unit/s3/test_connection.py tests/unit/s3/test_cors_configuration.py tests/unit/s3/test_key.py tests/unit/s3/test_keyfile.py diff -Nru python-boto-2.20.1/CONTRIBUTING python-boto-2.29.1/CONTRIBUTING --- python-boto-2.20.1/CONTRIBUTING 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/CONTRIBUTING 2014-05-30 20:49:34.000000000 +0000 @@ -40,7 +40,7 @@ * boto * Optionally of the other dependencies involved - * If possile, create a pull request with a (failing) test case demonstrating + * If possible, create a pull request with a (failing) test case demonstrating what's wrong. This makes the process for fixing bugs quicker & gets issues resolved sooner. diff -Nru python-boto-2.20.1/debian/changelog python-boto-2.29.1/debian/changelog --- python-boto-2.20.1/debian/changelog 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/debian/changelog 2014-07-25 19:29:13.000000000 +0000 @@ -1,3 +1,30 @@ +python-boto (2.29.1-1ubuntu1) utopic; urgency=medium + + * Merge with debian. Remaining Ubuntu changes: + * d/tests/unit: Fix autopkgtest error in test_timeout. + * d/tests/unit: run tests/test.py with each python in 'pyversions -i' + + -- Scott Moser Fri, 25 Jul 2014 12:59:34 -0400 + +python-boto (2.29.1-1) unstable; urgency=medium + + * New upstream release (Closes: #750571). + + -- Eric Evans Sat, 28 Jun 2014 18:13:59 -0500 + +python-boto (2.27.0-1) unstable; urgency=medium + + * Missing dependency on python-requests (Closes: #740170) + * Run autopkgtests against installed python-boto package. + + -- Eric Evans Sun, 30 Mar 2014 12:36:49 -0500 + +python-boto (2.25.0-1) unstable; urgency=medium + + * New upstream release. + + -- Eric Evans Fri, 07 Feb 2014 20:32:57 -0600 + python-boto (2.20.1-2ubuntu2) trusty; urgency=medium * d/tests/unit: Fix autopkgtest error in test_timeout. diff -Nru python-boto-2.20.1/debian/tests/control python-boto-2.29.1/debian/tests/control --- python-boto-2.20.1/debian/tests/control 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/debian/tests/control 2014-07-25 19:29:13.000000000 +0000 @@ -1,3 +1,3 @@ Tests: unit -Depends: python-all (>= 2.6.6-3), python-nose (>= 1.3.0), python-mock (>= 1.0.1), python-httpretty (>= 0.6.3), python-requests +Depends: python-boto, python-nose (>= 1.3.0), python-mock (>= 1.0.1), python-httpretty (>= 0.6.3) Restrictions: allow-stderr diff -Nru python-boto-2.20.1/debian/tests/unit python-boto-2.29.1/debian/tests/unit --- python-boto-2.20.1/debian/tests/unit 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/debian/tests/unit 2014-07-25 19:29:13.000000000 +0000 @@ -10,7 +10,13 @@ touch "$home_temp/.ssh/known_hosts" export HOME=$home_temp -tests/test.py unit > /dev/null +set -eu +cp -r tests "$ADTTMP/" +cd "$ADTTMP" + +for python in $(pyversions -i); do + $python tests/test.py unit > /dev/null +done #Restore original home directory and remove temp home dir export HOME=$home_orig diff -Nru python-boto-2.20.1/docs/source/autoscale_tut.rst python-boto-2.29.1/docs/source/autoscale_tut.rst --- python-boto-2.20.1/docs/source/autoscale_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/autoscale_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -199,7 +199,7 @@ >>> import boto.ec2 >>> ec2 = boto.ec2.connect_to_region('us-west-2) ->>> conn.get_all_groups(names=['my_group'])[0] +>>> group = conn.get_all_groups(names=['my_group'])[0] >>> instance_ids = [i.instance_id for i in group.instances] >>> instances = ec2.get_only_instances(instance_ids) diff -Nru python-boto-2.20.1/docs/source/boto_config_tut.rst python-boto-2.29.1/docs/source/boto_config_tut.rst --- python-boto-2.20.1/docs/source/boto_config_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/boto_config_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -10,30 +10,35 @@ There is a growing list of configuration options for the boto library. Many of these options can be passed into the constructors for top-level objects such as connections. Some options, such as credentials, can also be read from -environment variables (e.g. ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY``). -It is also possible to manage these options in a central place through the use -of boto config files. +environment variables (e.g. ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, +``AWS_SECURITY_TOKEN`` and ``AWS_PROFILE``). It is also possible to manage +these options in a central place through the use of boto config files. Details ------- -A boto config file is simply a .ini format configuration file that specifies -values for options that control the behavior of the boto library. Upon startup, -the boto library looks for configuration files in the following locations +A boto config file is a text file formatted like an .ini configuration file that specifies +values for options that control the behavior of the boto library. In Unix/Linux systems, +on startup, the boto library looks for configuration files in the following locations and in the following order: * /etc/boto.cfg - for site-wide settings that all users on this machine will use * ~/.boto - for user-specific settings +* ~/.aws/credentials - for credentials shared between SDKs -The options are merged into a single, in-memory configuration that is -available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config` +In Windows, create a text file that has any name (e.g. boto.config). It's +recommended that you put this file in your user folder. Then set +a user environment variable named BOTO_CONFIG to the full path of that file. + +The options in the config file are merged into a single, in-memory configuration +that is available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config` class is a subclass of the standard Python :py:class:`ConfigParser.SafeConfigParser` object and inherits all of the methods of that object. In addition, the boto :py:class:`Config ` class defines additional methods that are described on the PyamiConfigMethods page. -An example ``~/.boto`` file should look like:: +An example boto config file might look like:: [Credentials] aws_access_key_id = @@ -52,9 +57,12 @@ The Credentials section is used to specify the AWS credentials used for all boto requests. The order of precedence for authentication credentials is: -* Credentials passed into Connection class constructor. +* Credentials passed into the Connection class constructor. * Credentials specified by environment variables -* Credentials specified as options in the config file. +* Credentials specified as named profiles in the shared credential file. +* Credentials specified by default in the shared credential file. +* Credentials specified as named profiles in the config file. +* Credentials specified by default in the config file. This section defines the following options: ``aws_access_key_id`` and ``aws_secret_access_key``. The former being your AWS key id and the latter @@ -62,12 +70,39 @@ For example:: + [profile name_goes_here] + aws_access_key_id = + aws_secret_access_key = + [Credentials] - aws_access_key_id = - aws_secret_access_key = + aws_access_key_id = + aws_secret_access_key = Please notice that quote characters are not used to either side of the '=' -operator even when both your AWS access key id and secret key are strings. +operator even when both your AWS access key ID and secret key are strings. + +If you have multiple AWS keypairs that you use for different purposes, +use the ``profile`` style shown above. You can set an arbitrary number +of profiles within your configuration files and then reference them by name +when you instantiate your connection. If you specify a profile that does not +exist in the configuration, the keys used under the ``[Credentials]`` heading +will be applied by default. + +The shared credentials file in ``~/.aws/credentials`` uses a slightly +different format. For example:: + + [default] + aws_access_key_id = + aws_secret_access_key = + + [name_goes_here] + aws_access_key_id = + aws_secret_access_key = + + [another_profile] + aws_access_key_id = + aws_secret_access_key = + aws_security_token = For greater security, the secret key can be stored in a keyring and retrieved via the keyring package. To use a keyring, use ``keyring``, @@ -135,11 +170,18 @@ :is_secure: Is the connection over SSL. This setting will overide passed in values. :https_validate_certificates: Validate HTTPS certificates. This is on by default -:ca_certificates_file: Location of CA certificates +:ca_certificates_file: Location of CA certificates or the keyword "system". + Using the system keyword lets boto get out of the way and makes the + SSL certificate validation the responsibility the underlying SSL + implementation provided by the system. :http_socket_timeout: Timeout used to overwrite the system default socket timeout for httplib . :send_crlf_after_proxy_auth_headers: Change line ending behaviour with proxies. For more details see this `discussion `_ +:endpoints_path: Allows customizing the regions/endpoints available in Boto. + Provide an absolute path to a custom JSON file, which gets merged into the + defaults. (This can also be specified with the ``BOTO_ENDPOINTS`` + environment variable instead.) These settings will default to:: @@ -150,6 +192,7 @@ ca_certificates_file = cacerts.txt http_socket_timeout = 60 send_crlf_after_proxy_auth_headers = False + endpoints_path = /path/to/my/boto/endpoints.json You can control the timeouts and number of retries used when retrieving information from the Metadata Service (this is used for retrieving credentials diff -Nru python-boto-2.20.1/docs/source/cloudwatch_tut.rst python-boto-2.29.1/docs/source/cloudwatch_tut.rst --- python-boto-2.20.1/docs/source/cloudwatch_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/cloudwatch_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -76,7 +76,7 @@ data for the previous hour:: >>> import datetime - >>> end = datetime.datetime.now() + >>> end = datetime.datetime.utcnow() >>> start = end - datetime.timedelta(hours=1) We also need to supply the Statistic that we want reported and diff -Nru python-boto-2.20.1/docs/source/dynamodb2_tut.rst python-boto-2.29.1/docs/source/dynamodb2_tut.rst --- python-boto-2.20.1/docs/source/dynamodb2_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/dynamodb2_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -310,6 +310,13 @@ Querying -------- +.. warning:: + + The ``Table`` object has both a ``query`` & a ``query_2`` method. If you + are writing new code, **DO NOT** use ``Table.query``. It presents results + in an incorrect order than expected & is strictly present for + backward-compatibility. + Manually fetching out each item by itself isn't tenable for large datasets. To cope with fetching many records, you can either perform a standard query, query via a local secondary index or scan the entire table. @@ -338,7 +345,7 @@ To run a query for last names starting with the letter "D":: - >>> names_with_d = users.query( + >>> names_with_d = users.query_2( ... account_type__eq='standard_user', ... last_name__beginswith='D' ... ) @@ -352,7 +359,7 @@ You can also reverse results (``reverse=True``) as well as limiting them (``limit=2``):: - >>> rev_with_d = users.query( + >>> rev_with_d = users.query_2( ... account_type__eq='standard_user', ... last_name__beginswith='D', ... reverse=True, @@ -369,7 +376,7 @@ fields:: # Users within the last hour. - >>> recent = users.query( + >>> recent = users.query_2( ... account_type__eq='standard_user', ... date_joined__gte=time.time() - (60 * 60), ... index='DateJoinedIndex' @@ -380,6 +387,25 @@ 'Alice' 'Jane' +By default, DynamoDB can return a large amount of data per-request (up to 1Mb +of data). To prevent these requests from drowning other smaller gets, you can +specify a smaller page size via the ``max_page_size`` argument to +``Table.query_2`` & ``Table.scan``. Doing so looks like:: + + # Small pages yield faster responses & less potential of drowning other + # requests. + >>> all_users = users.query_2( + ... account_type__eq='standard_user', + ... date_joined__gte=0, + ... max_page_size=10 + ... ) + + # Usage is the same, but now many smaller requests are done. + >>> for user in recent: + ... print user['first_name'] + 'Alice' + 'Jane' + Finally, if you need to query on data that's not in either a key or in an index, you can run a ``Table.scan`` across the whole table, which accepts a similar but expanded set of filters. If you're familiar with the Map/Reduce @@ -407,6 +433,49 @@ 'John' +The ``ResultSet`` +~~~~~~~~~~~~~~~~~ + +Both ``Table.query_2`` & ``Table.scan`` return an object called ``ResultSet``. +It's a lazily-evaluated object that uses the `Iterator protocol`_. It delays +your queries until you request the next item in the result set. + +Typical use is simply a standard ``for`` to iterate over the results:: + + >>> result_set = users.scan() + >>> for user in result_set: + ... print user['first_name'] + +However, this throws away results as it fetches more data. As a result, you +can't index it like a ``list``. + + >>> len(result_set) + 0 + +Because it does this, if you need to loop over your results more than once (or +do things like negative indexing, length checks, etc.), you should wrap it in +a call to ``list()``. Ex.:: + + >>> result_set = users.scan() + >>> all_users = list(result_set) + # Slice it for every other user. + >>> for user in all_users[::2]: + ... print user['first_name'] + +.. warning:: + + Wrapping calls like the above in ``list(...)`` **WILL** cause it to evaluate + the **ENTIRE** potentially large data set. + + Appropriate use of the ``limit=...`` kwarg to ``Table.query_2`` & + ``Table.scan`` calls are **VERY** important should you chose to do this. + + Alternatively, you can build your own list, using ``for`` on the + ``ResultSet`` to lazily build the list (& potentially stop early). + +.. _`Iterator protocol`: http://docs.python.org/2/library/stdtypes.html#iterator-types + + Parallel Scan ------------- diff -Nru python-boto-2.20.1/docs/source/ec2_tut.rst python-boto-2.29.1/docs/source/ec2_tut.rst --- python-boto-2.20.1/docs/source/ec2_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ec2_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -176,3 +176,40 @@ True +Working With Launch Configurations +---------------------------------- + +Launch Configurations allow you to create a re-usable set of properties for an +instance. These are used with AutoScaling groups to produce consistent repeatable +instances sets. + +Creating a Launch Configuration is easy: + + >>> conn = boto.connect_autoscale() + >>> config = LaunchConfig(name='foo', image_id='ami-abcd1234', key_name='foo.pem') + >>> conn.create_launch_configuration(config) + +Once you have a launch configuration, you can list you current configurations: + + >>> conn = boto.connect_autoscale() + >>> config = conn.get_all_launch_configurations(names=['foo']) + +If you no longer need a launch configuration, you can delete it: + + >>> conn = boto.connect_autoscale() + >>> conn.delete_launch_configuration('foo') + +.. versionchanged:: 2.27.0 +.. Note:: + + If ``use_block_device_types=True`` is passed to the connection it will deserialize + Launch Configurations with Block Device Mappings into a re-usable format with + BlockDeviceType objects, similar to how AMIs are deserialized currently. Legacy + behavior is to put them into a format that is incompatabile with creating new Launch + Configurations. This switch is in place to preserve backwards compatability, but + its usage is the preferred format going forward. + + If you would like to use the new format, you should use something like: + + >>> conn = boto.connect_autoscale(use_block_device_types=True) + >>> config = conn.get_all_launch_configurations(names=['foo']) diff -Nru python-boto-2.20.1/docs/source/elb_tut.rst python-boto-2.29.1/docs/source/elb_tut.rst --- python-boto-2.20.1/docs/source/elb_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/elb_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -74,7 +74,7 @@ Getting Existing Load Balancers ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To retrieve any exiting load balancers: +To retrieve any existing load balancers: >>> conn.get_all_load_balancers() [LoadBalancer:load-balancer-prod, LoadBalancer:load-balancer-staging] @@ -92,7 +92,7 @@ >>> balancers = conn.get_all_load_balancers() >>> balancers[0] -[LoadBalancer:load-balancer-prod] +LoadBalancer:load-balancer-prod Creating a Load Balancer ------------------------ diff -Nru python-boto-2.20.1/docs/source/index.rst python-boto-2.29.1/docs/source/index.rst --- python-boto-2.20.1/docs/source/index.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/index.rst 2014-05-30 20:49:34.000000000 +0000 @@ -24,6 +24,7 @@ * :doc:`Elastic Compute Cloud (EC2) ` -- (:doc:`API Reference `) * :doc:`Elastic MapReduce (EMR) ` -- (:doc:`API Reference `) * :doc:`Auto Scaling ` -- (:doc:`API Reference `) + * Kinesis -- (:doc:`API Reference `) * **Content Delivery** @@ -33,6 +34,7 @@ * :doc:`DynamoDB2 ` -- (:doc:`API Reference `) -- (:doc:`Migration Guide from v1 `) * :doc:`DynamoDB ` -- (:doc:`API Reference `) + * Relational Data Services 2 (RDS) -- (:doc:`API Reference `) -- (:doc:`Migration Guide from v1 `) * :doc:`Relational Data Services (RDS) ` -- (:doc:`API Reference `) * ElastiCache -- (:doc:`API Reference `) * Redshift -- (:doc:`API Reference `) @@ -53,6 +55,7 @@ * **Application Services** + * Cloudsearch 2 -- (:doc:`API Reference `) * :doc:`Cloudsearch ` -- (:doc:`API Reference `) * Elastic Transcoder -- (:doc:`API Reference `) * :doc:`Simple Workflow Service (SWF) ` -- (:doc:`API Reference `) @@ -66,7 +69,7 @@ * **Networking** - * Route 53 -- (:doc:`API Reference `) + * :doc:`Route 53 ` -- (:doc:`API Reference `) * :doc:`Virtual Private Cloud (VPC) ` -- (:doc:`API Reference `) * :doc:`Elastic Load Balancing (ELB) ` -- (:doc:`API Reference `) @@ -96,6 +99,7 @@ * :doc:`Command Line Utilities ` * :doc:`Boto Config Tutorial ` * :doc:`Contributing to Boto ` +* :doc:`Evaluating Application performance with Boto logging ` * `Boto Source Repository`_ * `Boto Issue Tracker`_ * `Boto Twitter`_ @@ -115,6 +119,20 @@ .. toctree:: :titlesonly: + releasenotes/v2.29.1 + releasenotes/v2.29.0 + releasenotes/v2.28.0 + releasenotes/v2.27.0 + releasenotes/v2.26.1 + releasenotes/v2.26.0 + releasenotes/v2.25.0 + releasenotes/v2.24.0 + releasenotes/v2.23.0 + releasenotes/v2.22.1 + releasenotes/v2.22.0 + releasenotes/v2.21.2 + releasenotes/v2.21.1 + releasenotes/v2.21.0 releasenotes/v2.20.1 releasenotes/v2.20.0 releasenotes/v2.19.0 @@ -177,6 +195,7 @@ vpc_tut elb_tut s3_tut + route53_tut boto_config_tut documentation contributing @@ -184,6 +203,7 @@ support_tut dynamodb2_tut migrations/dynamodb_v1_to_v2 + migrations/rds_v1_to_v2 apps_built_on_boto ref/* releasenotes/* diff -Nru python-boto-2.20.1/docs/source/migrations/dynamodb_v1_to_v2.rst python-boto-2.29.1/docs/source/migrations/dynamodb_v1_to_v2.rst --- python-boto-2.20.1/docs/source/migrations/dynamodb_v1_to_v2.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/migrations/dynamodb_v1_to_v2.rst 2014-05-30 20:49:34.000000000 +0000 @@ -231,7 +231,7 @@ >>> from boto.dynamodb2.table import Table >>> table = Table('messages') - >>> items = table.query( + >>> items = table.query_2( ... forum_name__eq='Amazon DynamoDB', ... subject__beginswith='DynamoDB', ... limit=1 diff -Nru python-boto-2.20.1/docs/source/migrations/rds_v1_to_v2.rst python-boto-2.29.1/docs/source/migrations/rds_v1_to_v2.rst --- python-boto-2.20.1/docs/source/migrations/rds_v1_to_v2.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/migrations/rds_v1_to_v2.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,91 @@ +.. rds_v1_to_v2: + +=============================== +Migrating from RDS v1 to RDS v2 +=============================== + +The original ``boto.rds`` module has historically lagged quite far behind the +service (at time of writing, almost 50% of the API calls are +missing/out-of-date). To address this, the Boto core team has switched to +a generated client for RDS (``boto.rds2.layer1.RDSConnection``). + +However, this generated variant is not backward-compatible with the older +``boto.rds.RDSConnection``. This document is to help you update your code +(as desired) to take advantage of the latest API calls. + +For the duration of the document, **RDS2Connection** refers to +``boto.rds2.layer1.RDSConnection``, where **RDSConnection** refers to +``boto.rds.RDSConnection``. + + +Prominent Differences +===================== + +* The new **RDS2Connection** maps very closely to the `official API operations`_, + where the old **RDSConnection** had non-standard & inconsistent method names. +* **RDS2Connection** almost always returns a Python dictionary that maps + closely to the API output. **RDSConnection** returned Python objects. +* **RDS2Connection** is much more verbose in terms of output. Tools like + `jmespath`_ or `jsonq`_ can make handling these sometimes complex dictionaries more + manageable. + +.. _`official API operations`: http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/Welcome.html +.. _`jmespath`: https://github.com/boto/jmespath +.. _`jsonq`: https://github.com/edmund-huber/jsonq + + +Method Renames +============== + +Format is ``old_method_name`` -> ``new_method_name``: + +* ``authorize_dbsecurity_group`` -> ``authorize_db_security_group_ingress`` +* ``create_dbinstance`` -> ``create_db_instance`` +* ``create_dbinstance_read_replica`` -> ``create_db_instance_read_replica`` +* ``create_parameter_group`` -> ``create_db_parameter_group`` +* ``get_all_dbsnapshots`` -> ``describe_db_snapshots`` +* ``get_all_events`` -> ``describe_events`` +* ``modify_dbinstance`` -> ``modify_db_instance`` +* ``reboot_dbinstance`` -> ``reboot_db_instance`` +* ``restore_dbinstance_from_dbsnapshot`` -> ``restore_db_instance_from_db_snapshot`` +* ``restore_dbinstance_from_point_in_time`` -> ``restore_db_instance_to_point_in_time`` +* ``revoke_dbsecurity_group`` -> ``revoke_db_security_group_ingress`` + + +Parameter Changes +================= + +Many parameter names have changed between **RDSConnection** & +**RDS2Connection**. For instance, the old name for the instance identifier was +``id``, where the new name is ``db_instance_identifier``. These changes are to +ensure things map more closely to the API. + +In addition, in some cases, ordering & required-ness of parameters has changed +as well. For instance, in ``create_db_instance``, the +``engine`` parameter is now required (previously defaulted to ``MySQL5.1``) & +its position in the call has change to be before ``master_username``. + +As such, when updating your API calls, you should check the +API Reference documentation to ensure you're passing the +correct parameters. + + +Return Values +============= + +**RDSConnection** frequently returned higher-level Python objects. In contrast, +**RDS2Connection** returns Python dictionaries of the data. This will require +a bit more work to extract the necessary values. For example:: + + # Old + >>> instances = rds1_conn.get_all_dbinstances() + >>> inst = instances[0] + >>> inst.name + 'test-db' + + # New + >>> instances = rds2_conn.describe_db_instances() + >>> inst = instances['DescribeDBInstancesResponse']\ + ... ['DescribeDBInstancesResult']['DBInstances'][0] + >>> inst['DBName'] + 'test-db' diff -Nru python-boto-2.20.1/docs/source/rds_tut.rst python-boto-2.29.1/docs/source/rds_tut.rst --- python-boto-2.20.1/docs/source/rds_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/rds_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -8,6 +8,15 @@ from Amazon Web Services. This tutorial assumes that you have boto already downloaded and installed, and that you wish to setup a MySQL instance in RDS. +.. warning:: + + This tutorial covers the **ORIGINAL** module for RDS. + It has since been supplanted by a second major version & an + updated API complete with all service operations. The documentation for the + new version of boto's support for RDS is at + :doc:`RDS v2 `. + + Creating a Connection --------------------- The first step in accessing RDS is to create a connection to the service. diff -Nru python-boto-2.20.1/docs/source/ref/autoscale.rst python-boto-2.29.1/docs/source/ref/autoscale.rst --- python-boto-2.20.1/docs/source/ref/autoscale.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/autoscale.rst 2014-05-30 20:49:34.000000000 +0000 @@ -60,3 +60,11 @@ .. automodule:: boto.ec2.autoscale.scheduled :members: :undoc-members: + + +boto.ec2.autoscale.tag +---------------------------- + +.. automodule:: boto.ec2.autoscale.tag + :members: + :undoc-members: diff -Nru python-boto-2.20.1/docs/source/ref/cloudsearch2.rst python-boto-2.29.1/docs/source/ref/cloudsearch2.rst --- python-boto-2.20.1/docs/source/ref/cloudsearch2.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/cloudsearch2.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,54 @@ +.. ref-cloudsearch2 + +=========== +Cloudsearch +=========== + +boto.cloudsearch2 +----------------- + +.. automodule:: boto.cloudsearch2 + :members: + :undoc-members: + +boto.cloudsearch2.domain +------------------------ + +.. automodule:: boto.cloudsearch2.domain + :members: + :undoc-members: + +boto.cloudsearch2.layer1 +------------------------ + +.. automodule:: boto.cloudsearch2.layer1 + :members: + :undoc-members: + +boto.cloudsearch2.layer2 +------------------------ + +.. automodule:: boto.cloudsearch2.layer2 + :members: + :undoc-members: + +boto.cloudsearch2.optionstatus +------------------------------ + +.. automodule:: boto.cloudsearch2.optionstatus + :members: + :undoc-members: + +boto.cloudsearch2.search +------------------------ + +.. automodule:: boto.cloudsearch2.search + :members: + :undoc-members: + +boto.cloudsearch2.document +-------------------------- + +.. automodule:: boto.cloudsearch2.document + :members: + :undoc-members: diff -Nru python-boto-2.20.1/docs/source/ref/cloudsearch.rst python-boto-2.29.1/docs/source/ref/cloudsearch.rst --- python-boto-2.20.1/docs/source/ref/cloudsearch.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/cloudsearch.rst 2014-05-30 20:49:34.000000000 +0000 @@ -8,7 +8,7 @@ ---------------- .. automodule:: boto.cloudsearch - :members: + :members: :undoc-members: boto.cloudsearch.domain @@ -18,6 +18,13 @@ :members: :undoc-members: +boto.cloudsearch.exceptions +----------------------- + +.. automodule:: boto.cloudsearch.exceptions + :members: + :undoc-members: + boto.cloudsearch.layer1 ----------------------- @@ -52,8 +59,3 @@ .. automodule:: boto.cloudsearch.document :members: :undoc-members: - - - - - diff -Nru python-boto-2.20.1/docs/source/ref/index.rst python-boto-2.29.1/docs/source/ref/index.rst --- python-boto-2.20.1/docs/source/ref/index.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/index.rst 2014-05-30 20:49:34.000000000 +0000 @@ -22,6 +22,7 @@ glacier gs iam + kinesis manage mturk mws diff -Nru python-boto-2.20.1/docs/source/ref/kinesis.rst python-boto-2.29.1/docs/source/ref/kinesis.rst --- python-boto-2.20.1/docs/source/ref/kinesis.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/kinesis.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-kinesis + +======= +Kinesis +======= + +boto.kinesis +------------ + +.. automodule:: boto.kinesis + :members: + :undoc-members: + +boto.kinesis.layer1 +------------------- + +.. automodule:: boto.kinesis.layer1 + :members: + :undoc-members: + +boto.kinesis.exceptions +----------------------- + +.. automodule:: boto.kinesis.exceptions + :members: + :undoc-members: diff -Nru python-boto-2.20.1/docs/source/ref/rds2.rst python-boto-2.29.1/docs/source/ref/rds2.rst --- python-boto-2.20.1/docs/source/ref/rds2.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/rds2.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,26 @@ +.. ref-rds2 + +==== +RDS2 +==== + +boto.rds2 +--------- + +.. automodule:: boto.rds2 + :members: + :undoc-members: + +boto.rds2.exceptions +-------------------- + +.. automodule:: boto.rds2.exceptions + :members: + :undoc-members: + +boto.rds2.layer1 +---------------- + +.. automodule:: boto.rds2.layer1 + :members: + :undoc-members: diff -Nru python-boto-2.20.1/docs/source/ref/route53.rst python-boto-2.29.1/docs/source/ref/route53.rst --- python-boto-2.20.1/docs/source/ref/route53.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/route53.rst 2014-05-30 20:49:34.000000000 +0000 @@ -13,7 +13,7 @@ :undoc-members: boto.route53.exception -------------------- +---------------------- .. automodule:: boto.route53.exception :members: @@ -27,7 +27,7 @@ :undoc-members: boto.route53.zone ------------------------- +----------------- .. automodule:: boto.route53.zone :members: diff -Nru python-boto-2.20.1/docs/source/ref/swf.rst python-boto-2.29.1/docs/source/ref/swf.rst --- python-boto-2.20.1/docs/source/ref/swf.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/ref/swf.rst 2014-05-30 20:49:34.000000000 +0000 @@ -18,6 +18,13 @@ :members: :undoc-members: +boto.swf.layer1_decisions +------------------------- + +.. automodule:: boto.swf.layer1_decisions + :members: + :undoc-members: + boto.swf.layer2 -------------------- diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.21.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.21.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.21.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.21.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,43 @@ +boto v2.21.0 +============ + +:date: 2013/12/19 + +This release adds support for the latest AWS OpsWorks, AWS Elastic Beanstalk, +Amazon DynamoDB, Amazon Elastic MapReduce (EMR), Amazon Simple Storage Service +(S3), Amazon Elastic Transcoder, AWS CloudTrail, and AWS Support APIs. It also +includes documentation and other fixes. + +.. note:: + + Although Boto now includes support for the newly announced China (Beijing) + Region, the service endpoints will not be accessible until the Region’s + limited preview is launched in early 2014. To find out more about the new + Region and request a limited preview account, please visit + http://www.amazonaws.cn/. + + +Features +-------- +* Add support for Elastic Transcoder pagination and new codecs (:sha:`dcb1c5a`) +* Add support for new CloudTrail calling format (:sha:`aeafe9b`) +* Update to the latest Support API (:sha:`45e1884`) +* Add support for arbitrarily large SQS messages stored in S3 via BigMessage. (:issue:`1917`, :sha:`e6cd665`) +* Add support for ``encoding_type`` to S3 (:sha:`6b2d967`) +* Add support for Elastic MapReduce tags (:issue:`1928`, :issue:`1920`, :sha:`b9749c6`, :sha:`8e4c595`) +* Add high level support for global secondary indexes in DynamoDB (:issue:`1924`, :issue:`1913`, :sha:`32dac5b`) +* Add support for Elastic Beanstalk worker environments. (:issue:`1911`, :sha:`bbd4fbf`) +* Add support for OpsWorks IAM user permissions per stack (:sha:`ac6e4e7`) +* Add support for SigV4 to S3 (:sha:`deb9e18`) +* Add support for SigV4 to EC2 (:sha:`bdebfe0`) +* Add support for SigV4 to ElastiCache (:sha:`b892b45`) + + +Bugfixes +-------- +* Add documentation describing account usage for multipart uploads in S3 (:sha:`af03d8d`) +* Update DesiredCapacity if AutoScalingGroup.desired_capacity is not None. (:issue:`1906`, :issue:`1906`, :issue:`1757`, :sha:`b6670ce`) +* Documentation: add Kinesis API reference (:issue:`1921`, :sha:`c169836`) +* Documentation: sriovNetSupport instance attribute (:issue:`1915`, :sha:`e1bafcc`) +* Update RDS documentation for API version: 2013-09-09 (:issue:`1914`, :sha:`fcf702a`) +* Switch all classes to new style classes which results in memory use improvements (:sha:`ca36fa2`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.21.1.rst python-boto-2.29.1/docs/source/releasenotes/v2.21.1.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.21.1.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.21.1.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,21 @@ +boto v2.21.1 +============ + +:date: 2013/12/23 + +This release is a bugfix release which corrects how the Mechanical Turk objects +work & a threading issue when using ``datetime.strptime``. + + +Bugfixes +-------- + +* Added ``cn-north-1`` to regions. (:sha:`9c89de1`) +* Fixed threading issues related to ``datetime.strptime``. (:issue:`1898`, + :sha:`2ef66c9`) +* Updated all the old-style inheritance calls. (:issue:`1918`, :issue:`1936`, + :issue:`1937`, :sha:`39a997f` & :sha:`607624f`) +* Documentation: + + * Added missed notes about the cn-north-1 region. (:sha:`738c8cb`) + * Added the C3 family of EC2 instances. (:issue:`1938`, :sha:`05b7482`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.21.2.rst python-boto-2.29.1/docs/source/releasenotes/v2.21.2.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.21.2.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.21.2.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,13 @@ +boto v2.21.2 +============ + +:date: 2013/12/24 + +This release is a bugfix release which corrects one more bug in the Mechanical +Turk objects. + + +Bugfixes +-------- + +* Fixed a missed inheritance bug in mturk. (:issue:`1936`, :sha:`0137f29`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.22.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.22.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.22.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.22.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,29 @@ +boto v2.22.0 +============ + +:date: 2014/01/02 + +This release updates the Auto Scaling to support the latest API, the ability +to control the response sizes in Amazon DynamoDB queries/scans & a number of +bugfixes as well. + + +Features +-------- + +* Updated Auto Scaling to support the latest API. (:sha:`9984c4f`) +* Added the ability to alter response sizes in DynamoDB queries/scans. + (:issue:`1949`, :sha:`6761b01`) + + +Bugfixes +-------- + +* Fix string instance tests. (:issue:`1959`, :sha:`ee203bf`) +* Add missing parameters to ``get_spot_price_history method``. (:issue:`1958`, + :sha:`f635474`) +* Fix unicode string parameter handling in S3Connection. (:issue:`1954`, + :issue:`1952`, :sha:`12e6b0c`) +* Fix typo in docstring for SSHClient.run. (:issue:`1953`, :sha:`5263b20`) +* Properly handle getopt long options in s3put. (:issue:`1950`, :issue:`1946`, + :sha:`cf693ff`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.22.1.rst python-boto-2.29.1/docs/source/releasenotes/v2.22.1.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.22.1.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.22.1.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,22 @@ +boto v2.22.1 +============ + +:date: 2014/01/06 + +This release fixes working with keys with special characters in them while using +Signature V4 with Amazon Simple Storage Service (S3). It also fixes a regression +in the ``ResultSet`` object, re-adding the ``nextToken`` attribute. This was +most visible from within Amazon Elastic Compute Cloud (EC2) when calling the +``get_spot_price_history`` method. + +Users in the cn-north-1 region or who make active use of +``get_spot_price_history`` are recommended to upgrade. + + +Bugfixes +-------- + +* Fixed key names with special characters in S3 when using SigV4. + (:sha:`8b37180`) +* Re-added the ``nextToken`` attribute to the EC2 result set object. + (:issue:`1968`, :sha:`6928928`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.23.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.23.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.23.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.23.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,49 @@ +boto v2.23.0 +============ + +:date: 2014/01/10 + +This release adds new pagination & date range filtering to Amazon Glacier, more +support for selecting specific attributes within Amazon DynamoDB, security +tokens from environment/config variables & many bugfixes/small improvements. + + +Features +-------- + +* Added pagination & date range filtering to Glacier inventory options. + (:issue:`1977`, :sha:`402a305`) +* Added the ability to select the specific attributes to fetch in the ``scan`` + & ``get_item`` calls within DynamoDB v2. (:issue:`1945`, :issue:`1972`, + :sha:`f6451fb` & :sha:`96cd413`) +* Allow getting a security token from either an environment or configuration + variable. (:issue:``, :sha:``) +* Ported the ``has_item`` call from the original DynamoDB (v1) module to + DynamoDB v2. (:issue:`1973`, :issue:`1822`, :sha:`f96e9e3`) +* Added an ``associate_address_object`` method to EC2. (:issue:`1967`, + :issue:`1874`, :issue:`1893`, :sha:`dd6180c`) +* Added a ``download_to_fileobj`` method to Glacier,similar to the S3 call + of the same name. (:issue:`1960`, :issue:`1941`, :sha:`67266e5`) +* Added support for arbitrary ``dict`` inputs to MWS. (:issue:`1966`, + :sha:`46f193f`) + + +Bugfixes +-------- + +* Made the usage of ``is/is not`` more consistent. (:issue:`1930`, + :sha:`8597c54`) +* Imported ``with_statement`` for old Python versions (:issue:`1975`, + :sha:`a53a574`) +* Changed the ``Binary`` data object within DynamoDB to throw an error if an + invalid data type is used. (:issue:`1963`, :issue:`1956`, :sha:`e5d30c8`) +* Altered the integration tests to avoid connection errors to certain regions. + (:sha:`2555b8a`) +* Changed the GCS resumable upload handler to save tracker files with protection + 0600. (:sha:`7cb344c`) +* Documentation: + + * Clarified documentation around the ``list_metrics`` call in + CloudFormation. (:issue:`1962`, :sha:`c996a72`) + * Added ``Tag`` to the Autoscale API docs. (:issue:`1964`, :sha:`31118d9`) + * Updated the AWS Support documentation to the latest. (:sha:`29f9264`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.24.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.24.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.24.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.24.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,36 @@ +boto v2.24.0 +============ + +:date: 2014/01/29 + +This release adds M3 instance types to Amazon EC2, adds support for dead letter queues to Amazon Simple Queue Service (SQS), adds a single JSON file for all region and endpoint information and provides several fixes to a handful of services and documentation. Additionally, the SDK now supports using AWS Signature Version 4 with Amazon S3. + +Features +-------- +* Load region and endpoint information from a JSON file (:sha:`b9dbaad`) +* Return the x-amz-restore header with GET KEY and fix provider prefix. (:issue:`1990`, :sha:`43e8e0a`) +* Make S3 key validation optional with the ``validate`` parameter (:issue:`2013`, :issue:`1996`, :sha:`fd6b632`) +* Adding new eu-west-1 and eu-west-2 endpoints for SES. (:issue:`2015`, :sha:`d5ef862`, :sha:`56ba3e5`) +* Google Storage now uses new-style Python classes (:issue:`1927`, :sha:`86c9f77`) +* Add support for step summary list to Elastic MapReduce (:issue:`2011`, :sha:`d3af158`) +* Added the M3 instance types. (:issue:`2012`, :sha:`7c82f57`) +* Add credential profile configuration (:issue:`1979`, :sha:`e3ab708`) +* Add support for dead letter queues to SQS (:sha:`93c7d05`) + +Bugfixes +-------- +* Make the Lifecycle Id optional and fix prefix=None in XML generation. (:issue:`2021`, :sha:`362a04a`) +* Fix DynamoDB query limit bug (:issue:`2014`, :sha:`7ecb3f7`) +* Add documentation about the version_id behavior of Key objects. (:issue:`2026`, :sha:`b6b242c`) +* Fixed typo in Table.create example (:issue:`2023`, :sha:`d81a660`) +* Adding a license/copyright header. (:issue:`2025`, :sha:`26ded39`) +* Update the docstring for the SNS subscribe method (:issue:`2017`, :sha:`4c806de`) +* Renamed unit test with duplicate name (:issue:`2016`, :sha:`c7bd0bd`) +* Use UTC instead of local time in ``test_refresh_credentials`` (:issue:`2020`, :sha:`b5a2eaf`) +* Fix missing ``security_token`` option in some connection classes (:issue:`1989`, :issue:`1942`, :sha:`2b72f32`) +* Fix listing S3 multipart uploads with some parameter combinations (:issue:`2000`, :sha:`49045bc`) +* Fix ``elbadmin`` crash because of non-extant instances in load balancer (:issue:`2001`, :sha:`d47cc14`) +* Fix anonymous S3 fetch test case (:issue:`1988`, :issue:`1992`, :sha:`8fb1666`) +* Fix ``elbadmin`` boto import (:issue:`2002`, :sha:`674c3a6`) +* Fixing SQS tutorial to correctly describe behavior of the write operation (:issue:`1986`, :sha:`6147d86`) +* Fix various grammar mistakes (:issue:`1980`, :sha:`ada40b5`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.25.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.25.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.25.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.25.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,57 @@ +boto v2.25.0 +============ + +:date: 2014/02/07 + +This release includes Amazon Route53 service and documentation updates, +preliminary log file support for Amazon Relational Database Service (RDS), as +well as various other small fixes. Also included is an opt-in to use signature +version 4 with Amazon EC2. + +**IMPORTANT** - This release also include a **SIGNIFICANT** underlying change +to the Amazon S3 ``get_bucket`` method, to addresses the blog post by AppNeta_. +We've altered the default behavior to now perform a ``HEAD`` on the bucket, in +place of the old ``GET`` behavior (which would fetch a zero-length list of +keys). + +This should reduce all users costs & should also be *mostly* +backward-compatible. **HOWEVER**, if you were previously parsing the exception +message from ``S3Connection.get_bucket``, you *will* have to change your code +(see the S3 tutorial for details). ``HEAD`` does *not* return as detailed of +error messages & while we've attempted to patch over as much of the differences +as we can, there may still be edge-cases over the prior behavior. + +.. _AppNeta: http://www.appneta.com/blog/s3-list-get-bucket-default/ + + +Features +-------- + +* Add support for Route53 API version 2013-04-01 (:issue:`2080`, :sha:`600dcd0`) +* Add option to opt-in for EC2 SigV4 (:issue:`2074`, :sha:`4d780bd`) +* Add Autoscale feature to get all adjustment types (:issue:`2058`, + :issue:`1538`, :sha:`b9c7e15`) +* Add Route53 unit tests (:issue:`2066`, :sha:`e859576`) +* Add a basic Route53 tutorial (:issue:`2060`, :sha:`f0ad46b`) +* Add Autoscale associated public IP to launch configuration (:issue:`2051`, + :issue:`2028`, :issue:`2029`, :sha:`c58bda6`) +* Add option to pass VPC zone identifiers as a Python list (:issue:`2047`, + :issue:`1772`, :sha:`07ef9e1`) +* Add RDS call to get all log files (:issue:`2040`, :issue:`1994`, + :sha:`925b8cb`) + + +Bugfixes +-------- + +* Changed S3 ``get_bucket`` to use ``HEAD`` in place of ``GET``. (:issue:`2078`, + :issue:`2082`, :sha:`016be83`) +* Fix EMR's describe_cluster_command. (:issue:`2034`, :sha:`1c5621e`) +* Tutorial small code fix (:issue:`2072`, :sha:`38e7db1`) +* Fix CloudFront string representation (:issue:`2069`, :sha:`885c397`) +* Route53 doc cleanup (:issue:`2059`, :sha:`d2fc38e`) +* Fix MWS parsing of GetProductCategoriesForASIN response. (:issue:`2024`, + :sha:`0af08ce`) +* Fix SQS docs for get_queue_attributes (:issue:`2061`, :sha:`1cdc326`) +* Don't insert a '?' in URLs unless there is a query string (:issue:`2042`, + :issue:`1943`, :sha:`c15ce60`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.26.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.26.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.26.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.26.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,59 @@ +boto v2.26.0 +============ + +:date: 2014/02/27 + +This release adds support for MFA tokens in the AWS STS ``assume_role`` & the +introduction of the ``boto.rds2`` module (which has full support for the entire +RDS API). It also includes the addition of request hooks & many bugfixes. + + +Changes +------- + +* Added support for MFA in STS AssumeRole. (:sha:`899810c`) +* Fixed how DynamoDB v2 works with Global Secondary Indexes. (:issue:`2122`, + :sha:`f602c95`) +* Add request hooks and request logger. (:issue:`2125`, :sha:`e8b20fe`) +* Don't pull the security token from the environment or config when a caller + supplies the access key and secret. (:issue:`2123`, :sha:`4df1694`) +* Read EvaluateTargetHealth from Route53 resource record set. (:issue:`2120`, + :sha:`0a97158`) +* Prevent implicit string decode in hmac-v4 handlers. (:issue:`2037`, + :issue:`2033`, :sha:`8e56a5f`) +* Updated Datapipeline to include all current regions. (:issue:`2121`, + :sha:`dff5e3e`) +* Bug fix for Google Storage generate_url authentication. (:issue:`2116`, + :issue:`2108`, :sha:`5a50932`) +* Handle JSON error responses in BotoServerError. (:issue:`2113`, :issue:`2077`, + :sha:`221085e`) +* Corrected a typo in SQS tutorial. (:issue:`2114`, :sha:`7ed41f7`) +* Add CloudFormation template capabilities support. (:issue:`2111`, + :issue:`2075`, :sha:`65a4323`) +* Add SWF layer1_decisions to docs. (:issue:`2110`, :issue:`2062`, + :sha:`6039cc9`) +* Add support for request intervals in health checks. (:issue:`2109`, + :sha:`660b01a`) +* Added checks for invalid regions to the ``bin`` scripts (:issue:`2107`, + :sha:`bbb9f1e`) +* Better error output for unknown region - (:issue:`2041`, :issue:`1983`, + :sha:`cd63f92`) +* Added certificate tests for CloudTrail. (:issue:`2106`, :sha:`a7e9b4c`) +* Updated Kinesis endpoints. (:sha:`7bd4b6e`) +* Finished implementation of RDS's DescribeDBLogFiles. (:issue:`2084`, + :sha:`f3c706c`) +* Added support for RDS log file downloading. (:issue:`2086`, :issue:`1993`, + :sha:`4c51841`) +* Added some unit tests for CloudFront. (:issue:`2076`, :sha:`6c46b1d`) +* GS should ignore restore_headers as they are never set. (:issue:`2067`, + :sha:`f02aeb3`) +* Update CloudFormation to support the latest API. (:issue:`2101`, + :sha:`ea1b1b6`) +* Added Route53 health checks. (:issue:`2054`, :sha:`9028f7d`) +* Merge branch 'rds2' into develop Fixes #2097. (:issue:`2097`, :sha:`6843c16`) +* Fix Param class convert method (:issue:`2094`, :sha:`5cd4598`) +* Added support for Route53 aliasing. (:issue:`2096`, :sha:`df5fa40`) +* Removed the dependence on ``example.com`` within the Route53 tests. + (:issue:`2098`, :sha:`6ce9e0f`) +* Fixed ``has_item`` support in DynamoDB v2. (:issue:`2090`, :sha:`aada5d3`) +* Fix a little typo bug in the S3 tutorial. (:issue:`2088`, :sha:`c091d27`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.26.1.rst python-boto-2.29.1/docs/source/releasenotes/v2.26.1.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.26.1.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.26.1.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,14 @@ +boto v2.26.1 +============ + +:date: 2014/03/03 + +This release fixes an issue with the newly-added ``boto.rds2`` module when +trying to use ``boto.connect_rds2``. Parameters were not being passed correctly, +which would cause an immediate error. + + +Changes +------- + +* Fixed ``boto.connect_rds2`` to use kwargs. (:sha:`3828ece`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.27.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.27.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.27.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.27.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,33 @@ +boto v2.27.0 +============ + +:date: 2014/03/06 + +This release adds support for configuring access logs on Elastic Load Balancing +(including what Amazon Simple Storage Service (S3) bucket to use & how +frequently logs should be added to the bucket), adds request hook documentation +& a host of doc updates/bugfixes. + + +Changes +------- + +* Added support for ``AccessLog`` in ELB (:issue:`2150`, :sha:`7aa35ea`) +* Added better BlockDeviceType deserialization in Autoscaling. (:issue:`2149`, + :sha:`04d29a5`) +* Updated CloudFormation documentation (:issue:`2147`, :sha:`2535aca`) +* Updated Kinesis documentation (:issue:`2146`, :sha:`01425dc`) +* Add optional bucket tags to `lss3` output. (:issue:`2132`, :sha:`0f35924`) +* Fix getting instance types for Eucalyptus 4.0. (:issue:`2118`, :sha:`18dc07d`) +* Fixed how quoted strings are handled in SigV4 (:issue:`2142`, :sha:`2467547`) +* Use system supplied certs without a bundle file (:issue:`2139`, + :sha:`70d15b8`) +* Fixed incorrect test failures in EC2 ``trim_snapshots`` (:sha:`1fa9df7`) +* Raise any exceptions that are tagSet not found (:sha:`56d7d3e`) +* Added request hook docs (:issue:`2129`, :sha:`64eedce`) +* Fixed Route53 ``alias-healthcheck`` (:issue:`2126`, :sha:`141077f`) +* Fixed Elastic IP association in EC2 (:issue:`2131`, :issue:`1310`, + :sha:`d75fdfa`) +* Fixed builds on Travis for installing dependencies (:sha:`5e84e30`) +* Support printing tags on buckets when listing buckets (:sha:`c42a5dd`) +* PEP8/pyflakes/(some)pylint (:sha:`149175e`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.28.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.28.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.28.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.28.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,38 @@ +boto v2.28.0 +============ + +:date: 2014/05/08 + +This release adds support for Amazon SQS message attributes, Amazon DynamoDB query filters and enhanced conditional operators, adds support for the new Amazon CloudSearch 2013-01-01 API and includes various features and fixes for Amazon Route 53, Amazon EC2, Amazon Elastic Beanstalk, Amazon Glacier, AWS Identity and Access Management (IAM), Amazon S3, Mechanical Turk and MWS. + +Changes +------- +* Add support for SQS message attributes. (:issue:`2257`, :sha:`a04ca92`) +* Update DynamoDB to support query filters. (:issue:`2242`, :sha:`141eb71`) +* Implement new Cloudsearch API 2013-01-01 as cloudsearch2 module (:sha:`b0ababa`) +* Miscellaneous improvements to the MTurk CLI. (:issue:`2188`, :sha:`c213ff1`) +* Update MWS to latest API version and adds missing API calls. (:issue:`2203`, :issue:`2201`, :sha:`8adf720`, :sha:`8d0a6a8`) +* Update EC2 `register_image` to expose an option which sets whether an instance store is deleted on termination. The default value is left as-is. (:sha:`d295ee9`) +* Correct typo "possile" --> "possible". (:issue:`2196`, :sha:`d228352`) +* Update Boto configuration tutorial (:issue:`2191`, :sha:`f2a7a08`) +* Clarify that MTurkConnection.get_assignments attributes are actually strings. (:issue:`2187`, :issue:`2176`, :sha:`075636b`) +* Fix EC2 documentation typo (:issue:`2178`, :sha:`2627843`) +* Add support for ELB Connection Draining attribute. (:issue:`2174`, :issue:`2173`, :sha:`78fa43c`) +* Add support for setting failure threshold for Route53 health checks. (:issue:`2171`, :issue:`2170`, :sha:`15b812f`) +* Fix specification of Elastic Beanstalk tier parameter. (:issue:`2168`, :sha:`4492e86`) +* Fixed part of roboto for euca2ools. (:issue:`2166`, :issue:`1730`, :sha:`63b7a34`) +* Fixed removing policies from listeners. (:issue:`2165`, :issue:`1708`, :sha:`e5a2d9b`) +* Reintroduced the ``reverse`` fix for DDB. (:issue:`2163`, :sha:`70ec722`) +* Several fixes to DynamoDB describe calls. (:issue:`2161`, :issue:`1649`, :issue:`1663`, :sha:`84fb748`) +* Fixed how ``reverse`` works in DynamoDBv2. (:issue:`2160`, :issue:`2070`, :issue:`2115`, :sha:`afdd805`) +* Update Kinesis exceptions (:issue:`2159`, :issue:`2153`, :sha:`22c6751`) +* Fix ECS problem using new-style classes (:issue:`2103`, :sha:`dc466c7`) +* Add support for passing region info from SWF layer2 to layer1 (:issue:`2137`, :sha:`0dc8ce6`) +* Handle plus signs in S3 metadata (:issue:`2145`, :sha:`c2a0f95`) +* Fix Glacier vault date parsing (:issue:`2158`, :sha:`9e7b132`) +* Documentation fix. (:issue:`2156`, :sha:`7592a58`) +* Fix Route53 evaluate target health bug. (:issue:`2157`, :sha:`398bb62`) +* Removing obselete core directory. (:issue:`1987`, :sha:`8e83292`) +* Improve IAM behavior in the cn-north-1 region. (:issue:`2152`, :sha:`4050e70`) +* Add SetIdentityFeedbackForwardingEnabled and SetIdentityNotificationTopic for SES. (:issue:`2130`, :issue:`2128`, :sha:`83002d5`) +* Altered Route53 bin script to use UPSERT rather than CREATE. (:issue:`2151`, :sha:`2cd20e7`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.29.0.rst python-boto-2.29.1/docs/source/releasenotes/v2.29.0.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.29.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.29.0.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,25 @@ +boto v2.29.0 +============ + +:date: 2014/05/29 + +This release adds support for the AWS shared credentials file, adds support for Amazon Elastic Block Store (EBS) encryption, and contains a handful of fixes for Amazon EC2, AWS CloudFormation, AWS CloudWatch, AWS CloudTrail, Amazon DynamoDB and Amazon Relational Database Service (RDS). It also includes fixes for Python wheel support. + +A bug has been fixed such that a new exception is thrown when a profile name is explicitly passed either via code (``profile="foo"``) or an environment variable (``AWS_PROFILE=foo``) and that profile does not exist in any configuration file. Previously this was silently ignored, and the default credentials would be used without informing the user. + +Changes +------- +* Added support for shared credentials file. (:issue:`2292`, :sha:`d5ed49f`) +* Added support for EBS encryption. (:issue:`2282`, :sha:`d85a449`) +* Added GovCloud CloudFormation endpoint. (:issue:`2297`, :sha:`0f75fb9`) +* Added new CloudTrail endpoints to endpoints.json. (:issue:`2269`, :sha:`1168580`) +* Added 'name' param to documentation of ELB LoadBalancer. (:issue:`2291`, :sha:`86e1174`) +* Fix typo in ELB docs. (:issue:`2294`, :sha:`37aaa0f`) +* Fix typo in ELB tutorial. (:issue:`2290`, :sha:`40a758a`) +* Fix OpsWorks ``connect_to_region`` exception. (:issue:`2288`, :sha:`26729c7`) +* Fix timezones in CloudWatch date range example. (:issue:`2285`, :sha:`138a6d0`) +* Fix description of param tags into ``rds2.create_db_subnet_group``. (:issue:`2279`, :sha:`dc1037f`) +* Fix the incorrect name of a test case. (:issue:`2273`, :sha:`ee195a1`) +* Fix "consistent" argument to ``boto.dynamodb2.table.Table.batch_get``. (:issue:`2272`, :sha:`c432b09`) +* Update the wheel to be python 2 compatible only. (:issue:`2286`, :sha:`6ad0b75`) +* Crate.io is no longer a package index. (:issue:`2289`, :sha:`7f23de0`) diff -Nru python-boto-2.20.1/docs/source/releasenotes/v2.29.1.rst python-boto-2.29.1/docs/source/releasenotes/v2.29.1.rst --- python-boto-2.20.1/docs/source/releasenotes/v2.29.1.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/releasenotes/v2.29.1.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,11 @@ +boto v2.29.1 +============ + +:date: 2014/05/30 + +This release fixes a critical bug when the provider is not set to ``aws``, e.g. for Google Storage. It also fixes a problem with connection pooling in Amazon CloudSearch. + +Changes +------- +* Fix crash when provider is google. (:issue:`2302`, :sha:`33329d5888`) +* Fix connection pooling issue with CloudSearch (:sha:`82e83be12a`) diff -Nru python-boto-2.20.1/docs/source/request_hook_tut.rst python-boto-2.29.1/docs/source/request_hook_tut.rst --- python-boto-2.20.1/docs/source/request_hook_tut.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/request_hook_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,61 @@ +.. _request_hook_tut.rst: + +====================================== +An Introduction to boto's request hook +====================================== + +This tutorial shows you how to use the request hook for data gathering. + +It is often important to measure things we do as developers to better +understand application performance and the interactions between components +of the system. Boto plays a key role in some of those interactions as any +client library would. + +We'll go over how to use the request hook to do some simple request logging. + +Creating a connection +--------------------- + +For this example, let's use the EC2 interface as an example. Any connection +will work (IAM, SQS, etc..):: + + >>> from boto import ec2 + >>> conn = ec2.connect_to_region('us-west-2') + +You will be using this conn object for the remainder of the tutorial to send +commands to EC2. + +Adding your own hook +-------------------- + +The hook interface is defined in boto.utils.RequestHook +The method signature looks like:: + + def handle_request_data(self, request, response, error=False): + +In boto.requestlog.py, there is an implementation of this interface which +is written to handle multiple threads sending data to a single log +writing thread. Exammining this file, you'll see a log file, queue and thread +are created, then as requests are made, the handle_request_data() method is +called. It extracts data from the request and respose object to create a log +message. That's inserted into the queue and handled by the _request_log_worker +thread. + +One thing to note is that the boto request object has an additional value +"start_time", which is a datetime.now() as of the time right before the +request was issued. This can be used along with the current time (after the +request) to calculate the duration of the request. + +To add this logger to your connection:: + + >>> from boto.requestlog import RequestLogger + >>> conn.set_request_hook(RequestLogger()) + +That's all you need to do! Now, if you make a request, like:: + + >>> conn.get_all_volumes() + +The log message produced might look something like this:: + + '2014-02-26 21:38:27', '200', '0.791542', '592', 'DescribeVolumes' + diff -Nru python-boto-2.20.1/docs/source/route53_tut.rst python-boto-2.29.1/docs/source/route53_tut.rst --- python-boto-2.20.1/docs/source/route53_tut.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/route53_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,87 @@ +.. _route53_tut.rst: + +=========================================== +An Introduction to boto's Route53 interface +=========================================== + +This tutorial focuses on the boto interface to Route53 from Amazon Web +Services. This tutorial assumes that you have already downloaded and installed +boto. + +Route53 is a Domain Name System (DNS) web service. It can be used to route +requests to services running on AWS such as EC2 instances or load balancers, as +well as to external services. Route53 also allows you to have automated checks +to send requests where you require them. + +In this tutorial, we will be setting up our services for *example.com*. + +Creating a connection +--------------------- + +To start using Route53 you will need to create a connection to the service as +normal: + +>>> import boto.route53 +>>> conn = boto.route53.connect_to_region('us-west-2') + +You will be using this conn object for the remainder of the tutorial to send +commands to Route53. + +Working with domain names +------------------------- + +You can manipulate domains through a zone object. For example, you can create a +domain name: + +>>> zone = conn.create_zone("example.com.") + +Note that trailing dot on that domain name is significant. This is known as a +fully qualified domain name (`FQDN `_). + +>>> zone + + +You can also retrieve all your domain names: + +>>> conn.get_zones() +[] + +Or you can retrieve a single domain: + +>>> conn.get_zone("example.com.") + + +Finally, you can retrieve the list of nameservers that AWS has setup for this +domain name as follows: + +>>> zone.get_nameservers() +[u'ns-1000.awsdns-42.org.', u'ns-1001.awsdns-30.com.', u'ns-1002.awsdns-59.net.', u'ns-1003.awsdns-09.co.uk.'] + +Once you have finished configuring your domain name, you will need to change +your nameservers at your registrar to point to those nameservers for Route53 to +work. + +Setting up dumb records +----------------------- + +You can also add, update and delete records on a zone: + +>>> status = a.add_record("MX", "example.com.", "10 mail.isp.com") + +When you send a change request through, the status of the update will be +*PENDING*: + +>>> status + + +You can call the API again and ask for the current status as follows: + +>>> status.update() +'INSYNC' + +>>> status + + +When the status has changed to *INSYNC*, the change has been propagated to +remote servers + diff -Nru python-boto-2.20.1/docs/source/s3_tut.rst python-boto-2.29.1/docs/source/s3_tut.rst --- python-boto-2.20.1/docs/source/s3_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/s3_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -143,6 +143,24 @@ to and from S3 so you should be able to send and receive large files without any problem. +When fetching a key that has already exists, you have two options. If you're +uncertain whether a key exists (or if you need the metadata set on it, you can +call ``Bucket.get_key(key_name_here)``. However, if you're sure a key already +exists within a bucket, you can skip the check for a key on the server. + +:: + + >>> import boto + >>> c = boto.connect_s3() + >>> b = c.get_bucket('mybucket') # substitute your bucket name here + + # Will hit the API to check if it exists. + >>> possible_key = b.get_key('mykey') # substitute your key name here + + # Won't hit the API. + >>> key_we_know_is_there = b.get_key('mykey', validate=False) + + Accessing A Bucket ------------------ @@ -150,13 +168,33 @@ >>> mybucket = conn.get_bucket('mybucket') # Substitute in your bucket name >>> mybucket.list() - >> nonexistent = conn.get_bucket('i-dont-exist-at-all', validate=False) +.. versionchanged:: 2.25.0 +.. warning:: + + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + If the bucket does not exist, a ``S3ResponseError`` will commonly be thrown. If you'd rather not deal with any exceptions, you can use the ``lookup`` method.:: @@ -166,6 +204,7 @@ ... No such bucket! + Deleting A Bucket ----------------- diff -Nru python-boto-2.20.1/docs/source/sqs_tut.rst python-boto-2.29.1/docs/source/sqs_tut.rst --- python-boto-2.20.1/docs/source/sqs_tut.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/docs/source/sqs_tut.rst 2014-05-30 20:49:34.000000000 +0000 @@ -16,7 +16,7 @@ >>> import boto.sqs >>> conn = boto.sqs.connect_to_region( ... "us-west-2", - ... aws_access_key_id=', + ... aws_access_key_id='', ... aws_secret_access_key='') At this point the variable conn will point to an SQSConnection object in the @@ -107,12 +107,32 @@ >>> from boto.sqs.message import Message >>> m = Message() >>> m.set_body('This is my first message.') ->>> status = q.write(m) +>>> q.write(m) -The write method returns a True if everything went well. If the write -didn't succeed it will either return a False (meaning SQS simply chose -not to write the message for some reason) or an exception if there was -some sort of problem with the request. +The write method will return the ``Message`` object. The ``id`` and +``md5`` attribute of the ``Message`` object will be updated with the +values of the message that was written to the queue. + +Arbitrary message attributes can be defined by setting a simple dictionary +of values on the message object:: + +>>> m = Message() +>>> m.message_attributes = { + "name1": { + "data_type": "String", + "string_value": "I am a string" + }, + "name2": { + "data_type": "Number", + "string_value": "12" + } +} + +Note that by default, these arbitrary attributes are not returned when +you request messages from a queue. Instead, you must request them via +the ``message_attributes`` parameter (see below). + +If the message cannot be written an ``SQSError`` exception will be raised. Writing Messages (Custom Format) -------------------------------- @@ -135,7 +155,7 @@ >>> q.set_message_class(MyMessage) >>> m = MyMessage() >>> m.set_body('This is my first message.') ->>> status = q.write(m) +>>> q.write(m) where MyMessage is the class definition for your message class. Your message class should subclass the boto Message because there is a small @@ -205,6 +225,19 @@ >>> m.get_body() u'This is my first message' +Reading Message Attributes +-------------------------- +By default, no arbitrary message attributes are returned when requesting +messages. You can change this behavior by specifying the names of attributes +you wish to have returned:: + +>>> rs = queue.get_messages(message_attributes=['name1', 'name2']) +>>> print rs[0].message_attributes['name1']['string_value'] +'I am a string' + +A special value of ``All`` or ``.*`` may be passed to return all available +message attributes. + Deleting Messages and Queues ---------------------------- As stated above, messages are never deleted by the queue unless explicitly told to do so. diff -Nru python-boto-2.20.1/MANIFEST.in python-boto-2.29.1/MANIFEST.in --- python-boto-2.20.1/MANIFEST.in 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/MANIFEST.in 2014-05-30 20:49:34.000000000 +0000 @@ -3,6 +3,7 @@ include boto/file/README include .gitignore include pylintrc +include boto/endpoints.json include boto/pyami/copybot.cfg include boto/services/sonofmmm.cfg include boto/mturk/test/*.doctest diff -Nru python-boto-2.20.1/README.rst python-boto-2.29.1/README.rst --- python-boto-2.20.1/README.rst 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/README.rst 2014-05-30 20:49:34.000000000 +0000 @@ -1,15 +1,15 @@ #### boto #### -boto 2.20.1 +boto 2.29.1 -Released: 13-December-2013 +Released: 30-May-2014 .. image:: https://travis-ci.org/boto/boto.png?branch=develop :target: https://travis-ci.org/boto/boto .. image:: https://pypip.in/d/boto/badge.png - :target: https://crate.io/packages/boto/ + :target: https://pypi.python.org/pypi/boto/ ************ Introduction @@ -108,15 +108,15 @@ :: - $ pip install boto + $ pip install boto Install from source: :: - $ git clone git://github.com/boto/boto.git - $ cd boto - $ python setup.py install + $ git clone git://github.com/boto/boto.git + $ cd boto + $ python setup.py install ********** ChangeLogs diff -Nru python-boto-2.20.1/scripts/git-release-notes.py python-boto-2.29.1/scripts/git-release-notes.py --- python-boto-2.20.1/scripts/git-release-notes.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/scripts/git-release-notes.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +from __future__ import print_function + +import datetime +import re +import subprocess + +RELEASE = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+') +ISSUE = re.compile(r'#([0-9]+)') +REVLIST = 'git rev-list develop --abbrev-commit --format="parents %p%n%B%n~~~" --max-count=200 develop' +TEMPLATE = """ +boto v{version} +=========== + +:date: {date} + +Description goes here. + + +Changes +------- +{changes} +""" + +revisions = subprocess.check_output(REVLIST, shell=True, stderr=subprocess.STDOUT) + +commit_list = [] +for hunk in revisions.split('~~~')[:-1]: + lines = hunk.strip().splitlines() + commit = lines[0].split(' ', 1)[1] + parents = lines[1].split(' ', 1)[1].split(' ') + message = ' '.join(lines[2:]) + + #print(commit, parents) + + if RELEASE.search(message): + print('Found release commit, stopping:') + print(message) + break + + if len(parents) > 1: + commit_list.append([commit, message]) + +removals = [ + re.compile(r'merge pull request #[0-9]+ from [a-z0-9/_-]+', re.I), + re.compile(r"merge branch '[a-z0-9/_-]+' into [a-z0-9/_-]+", re.I), + re.compile(r'fix(es)? [#0-9, ]+.?', re.I) +] + +changes = '' +for commit, message in commit_list: + append = [] + issues = set() + for issue in ISSUE.findall(message): + if issue not in issues: + append.append(':issue:`{issue}`'.format(issue=issue)) + issues.add(issue) + append.append(':sha:`{commit}`'.format(commit=commit)) + append = ' (' + ', '.join(append) + ')' + + original = message + for removal in removals: + message = removal.sub('', message) + + message = message.strip() + + if not message: + message = original.strip() + + changes += '* ' + message + append + '\n' + +print(TEMPLATE.format( + version='?.?.?', + date=datetime.datetime.now().strftime('%Y/%m/%d'), + changes=changes +)) diff -Nru python-boto-2.20.1/scripts/rebuild_endpoints.py python-boto-2.29.1/scripts/rebuild_endpoints.py --- python-boto-2.20.1/scripts/rebuild_endpoints.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/scripts/rebuild_endpoints.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,54 @@ +from __future__ import print_function + +import json +from pyquery import PyQuery as pq +import requests + + +class FetchError(Exception): + pass + + +def fetch_endpoints(): + # We utilize what the Java SDK publishes as a baseline. + resp = requests.get('https://raw2.github.com/aws/aws-sdk-java/master/src/main/resources/etc/regions.xml') + + if int(resp.status_code) != 200: + raise FetchError("Failed to fetch the endpoints. Got {0}: {1}".format( + resp.status, + resp.body + )) + + return resp.text + +def parse_xml(raw_xml): + return pq(raw_xml, parser='xml') + + +def build_data(doc): + data = {} + + # Run through all the regions. These have all the data we need. + for region_elem in doc('Regions').find('Region'): + region = pq(region_elem, parser='xml') + region_name = region.find('Name').text() + + for endp in region.find('Endpoint'): + service_name = endp.find('ServiceName').text + endpoint = endp.find('Hostname').text + + data.setdefault(service_name, {}) + data[service_name][region_name] = endpoint + + return data + + +def main(): + raw_xml = fetch_endpoints() + doc = parse_xml(raw_xml) + data = build_data(doc) + print(json.dumps(data, indent=4, sort_keys=True)) + + +if __name__ == '__main__': + main() diff -Nru python-boto-2.20.1/setup.cfg python-boto-2.29.1/setup.cfg --- python-boto-2.20.1/setup.cfg 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/setup.cfg 2014-05-30 20:49:34.000000000 +0000 @@ -1,2 +1,2 @@ -[wheel] -universal = 1 +[bdist_wheel] +python-tag = py2 diff -Nru python-boto-2.20.1/setup.py python-boto-2.29.1/setup.py --- python-boto-2.20.1/setup.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/setup.py 2014-05-30 20:49:34.000000000 +0000 @@ -75,8 +75,12 @@ "boto.beanstalk", "boto.datapipeline", "boto.elasticache", "boto.elastictranscoder", "boto.opsworks", "boto.redshift", "boto.dynamodb2", "boto.support", "boto.cloudtrail", - "boto.directconnect", "boto.kinesis"], - package_data = {"boto.cacerts": ["cacerts.txt"]}, + "boto.directconnect", "boto.kinesis", "boto.rds2", + "boto.cloudsearch2"], + package_data = { + "boto.cacerts": ["cacerts.txt"], + "boto": ["endpoints.json"], + }, license = "MIT", platforms = "Posix; MacOS X; Windows", classifiers = ["Development Status :: 5 - Production/Stable", diff -Nru python-boto-2.20.1/tests/integration/cloudformation/test_connection.py python-boto-2.29.1/tests/integration/cloudformation/test_connection.py --- python-boto-2.20.1/tests/integration/cloudformation/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/cloudformation/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -105,6 +105,18 @@ template_body=json.dumps(BASIC_EC2_TEMPLATE)) self.addCleanup(self.connection.delete_stack, self.stack_name) + # A newly created stack should have events + events = self.connection.describe_stack_events(self.stack_name) + self.assertTrue(events) + + # No policy should be set on the stack by default + policy = self.connection.get_stack_policy(self.stack_name) + self.assertEqual(None, policy) + + # Our new stack should show up in the stack list + stacks = self.connection.describe_stacks() + self.assertEqual(self.stack_name, stacks[0].stack_name) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/integration/cloudsearch2/__init__.py python-boto-2.29.1/tests/integration/cloudsearch2/__init__.py --- python-boto-2.20.1/tests/integration/cloudsearch2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/cloudsearch2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff -Nru python-boto-2.20.1/tests/integration/cloudsearch2/test_cert_verification.py python-boto-2.29.1/tests/integration/cloudsearch2/test_cert_verification.py --- python-boto-2.20.1/tests/integration/cloudsearch2/test_cert_verification.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/cloudsearch2/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudsearch2 + + +class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudsearch = True + regions = boto.cloudsearch2.regions() + + def sample_service_call(self, conn): + conn.describe_domains() diff -Nru python-boto-2.20.1/tests/integration/cloudsearch2/test_layers.py python-boto-2.29.1/tests/integration/cloudsearch2/test_layers.py --- python-boto-2.20.1/tests/integration/cloudsearch2/test_layers.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/cloudsearch2/test_layers.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of Cloudsearch +""" +import time + +from tests.unit import unittest +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearch2.layer2 import Layer2 +from boto.regioninfo import RegionInfo + + +class CloudSearchLayer1Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer1Test, self).setUp() + self.layer1 = CloudSearchConnection() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + resp = self.layer1.create_domain(self.domain_name) + + resp = (resp['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.addCleanup(self.layer1.delete_domain, self.domain_name) + self.assertTrue(resp.get('Created', False)) + + +class CloudSearchLayer2Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer2Test, self).setUp() + self.layer2 = Layer2() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + domain = self.layer2.create_domain(self.domain_name) + self.addCleanup(domain.delete) + self.assertTrue(domain.created, False) + self.assertEqual(domain.domain_name, self.domain_name) + + def test_initialization_regression(self): + us_west_2 = RegionInfo( + name='us-west-2', + endpoint='cloudsearch.us-west-2.amazonaws.com' + ) + self.layer2 = Layer2( + region=us_west_2, + host='cloudsearch.us-west-2.amazonaws.com' + ) + self.assertEqual( + self.layer2.layer1.host, + 'cloudsearch.us-west-2.amazonaws.com' + ) diff -Nru python-boto-2.20.1/tests/integration/cloudtrail/test_cert_verification.py python-boto-2.29.1/tests/integration/cloudtrail/test_cert_verification.py --- python-boto-2.20.1/tests/integration/cloudtrail/test_cert_verification.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/cloudtrail/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudtrail + + +class CloudTrailCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudtrail = True + regions = boto.cloudtrail.regions() + + def sample_service_call(self, conn): + conn.describe_trails() diff -Nru python-boto-2.20.1/tests/integration/datapipeline/test_cert_verification.py python-boto-2.29.1/tests/integration/datapipeline/test_cert_verification.py --- python-boto-2.20.1/tests/integration/datapipeline/test_cert_verification.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/datapipeline/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.datapipeline + + +class DatapipelineCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + datapipeline = True + regions = boto.datapipeline.regions() + + def sample_service_call(self, conn): + conn.list_pipelines() diff -Nru python-boto-2.20.1/tests/integration/dynamodb2/forum_test_data.json python-boto-2.29.1/tests/integration/dynamodb2/forum_test_data.json --- python-boto-2.20.1/tests/integration/dynamodb2/forum_test_data.json 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/dynamodb2/forum_test_data.json 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,50 @@ +[ + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T12:30:54", + "body": "Forum poll: What's your favorite chiptune band & why?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "jane", + "posted_on": "2013-12-24T12:35:40", + "body": "I'd definitely go with POWERLIFTER. Love the use of LSDJ & vocals." + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T13:45:30", + "body": "Hm, I hadn't heard of them before. Will give a listen.\n\nMy favorite is definitely D&D Sluggers so far." + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T14:15:14", + "body": "Oh man, POWERLIFTER is really good. Do they have any more albums than the first one?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "jane", + "posted_on": "2013-12-24T14:25:33", + "body": "Yeah, check out their site. The second album has been out for a bit & is just as good." + }, + { + "thread": "Help with compression?", + "posted_by": "jane", + "posted_on": "2013-12-24T14:26:51", + "body": "I'm working on my latest & having some trouble. I've got compression on my drum track but I still can't keep the drum for muddying the bass line without losing clarity on the hats. :( Help?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T15:22:22", + "body": "Thanks for the tip! I'll have to check it out!" + }, + { + "thread": "Help with compression?", + "posted_by": "joe", + "posted_on": "2013-12-24T15:26:06", + "body": "Have you tried using side-chaining the compression? That'll allow the bass' input to control the volume of the drums based on when it's playing." + } +] diff -Nru python-boto-2.20.1/tests/integration/dynamodb2/test_highlevel.py python-boto-2.29.1/tests/integration/dynamodb2/test_highlevel.py --- python-boto-2.20.1/tests/integration/dynamodb2/test_highlevel.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/dynamodb2/test_highlevel.py 2014-05-30 20:49:34.000000000 +0000 @@ -25,15 +25,22 @@ """ from __future__ import with_statement +import os import time from tests.unit import unittest from boto.dynamodb2 import exceptions -from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex +from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex, + GlobalKeysOnlyIndex, GlobalIncludeIndex) from boto.dynamodb2.items import Item from boto.dynamodb2.table import Table from boto.dynamodb2.types import NUMBER +try: + import json +except ImportError: + import simplejson as json + class DynamoDBv2Test(unittest.TestCase): dynamodb = True @@ -102,6 +109,14 @@ time.sleep(5) + # Does it exist? It should? + self.assertTrue(users.has_item(username='jane', friend_count=3)) + # But this shouldn't be there... + self.assertFalse(users.has_item( + username='mrcarmichaeljones', + friend_count=72948 + )) + # Test getting an item & updating it. # This is the "safe" variant (only write if there have been no # changes). @@ -204,7 +219,7 @@ self.assertEqual(serverside_sadie['first_name'], 'Sadie') # Test the eventually consistent query. - results = users.query( + results = users.query_2( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', @@ -216,9 +231,19 @@ self.assertTrue(res['username'] in ['johndoe',]) self.assertEqual(res.keys(), ['username']) + # Ensure that queries with attributes don't return the hash key. + results = users.query_2( + username__eq='johndoe', + friend_count__eq=4, + attributes=('first_name',) + ) + + for res in results: + self.assertTrue(res['first_name'] in ['John',]) + self.assertEqual(res.keys(), ['first_name']) # Test the strongly consistent query. - c_results = users.query( + c_results = users.query_2( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', @@ -229,6 +254,18 @@ for res in c_results: self.assertTrue(res['username'] in ['johndoe',]) + # Test a query with query filters + results = users.query_2( + username__eq='johndoe', + query_filter={ + 'first_name__beginswith': 'J' + }, + attributes=('first_name',) + ) + + for res in results: + self.assertTrue(res['first_name'] in ['John']) + # Test scans without filters. all_users = users.scan(limit=7) self.assertEqual(all_users.next()['username'], 'bob') @@ -297,7 +334,7 @@ username__eq='johndoe' ) # But it shouldn't break on more complex tables. - res = users.query(username__eq='johndoe') + res = users.query_2(username__eq='johndoe') # Test putting with/without sets. mau5_created = users.put_item(data={ @@ -317,6 +354,16 @@ }) self.assertTrue(penny_created) + # Test attributes. + mau5 = users.get_item( + username='mau5', + friend_count=2, + attributes=['username', 'first_name'] + ) + self.assertEqual(mau5['username'], 'mau5') + self.assertEqual(mau5['first_name'], 'dead') + self.assertTrue('last_name' not in mau5) + def test_unprocessed_batch_writes(self): # Create a very limited table w/ low throughput. users = Table.create('slow_users', schema=[ @@ -343,3 +390,254 @@ # Post-__exit__, they should all be gone. self.assertEqual(len(batch._unprocessed), 0) + + def test_gsi(self): + users = Table.create('gsi_users', schema=[ + HashKey('user_id'), + ], throughput={ + 'read': 5, + 'write': 3, + }, + global_indexes=[ + GlobalKeysOnlyIndex('StuffIndex', parts=[ + HashKey('user_id') + ], throughput={ + 'read': 2, + 'write': 1, + }), + ]) + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + users.update( + throughput={ + 'read': 3, + 'write': 4 + }, + global_indexes={ + 'StuffIndex': { + 'read': 1, + 'write': 2 + } + } + ) + + # Wait again for the changes to finish propagating. + time.sleep(150) + + def test_gsi_with_just_hash_key(self): + # GSI allows for querying off of different keys. This is behavior we + # previously disallowed (due to standard & LSI queries). + # See https://forums.aws.amazon.com/thread.jspa?threadID=146212&tstart=0 + users = Table.create('gsi_query_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 3, + }, + global_indexes=[ + GlobalIncludeIndex('UsernameIndex', parts=[ + HashKey('username'), + ], includes=['user_id', 'username'], throughput={ + 'read': 3, + 'write': 1, + }) + ]) + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + users.put_item(data={ + 'user_id': '7', + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + }) + users.put_item(data={ + 'user_id': '24', + 'username': 'alice', + 'first_name': 'Alice', + 'last_name': 'Expert', + }) + users.put_item(data={ + 'user_id': '35', + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + }) + + # Try the main key. Should be fine. + rs = users.query_2( + user_id__eq='24' + ) + results = sorted([user['username'] for user in rs]) + self.assertEqual(results, ['alice']) + + # Now try the GSI. Also should work. + rs = users.query_2( + username__eq='johndoe', + index='UsernameIndex' + ) + results = sorted([user['username'] for user in rs]) + self.assertEqual(results, ['johndoe']) + + def test_query_with_limits(self): + # Per the DDB team, it's recommended to do many smaller gets with a + # reduced page size. + # Clamp down the page size while ensuring that the correct number of + # results are still returned. + posts = Table.create('posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the reduced page size. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + max_page_size=2 + ) + + all_posts = list(results) + self.assertEqual( + [post['posted_by'] for post in all_posts], + ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] + ) + self.assertTrue(results._fetches >= 3) + + def test_query_with_reverse(self): + posts = Table.create('more-posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the default order (ascending). + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit ascending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=False + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit descending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + + # Test the old, broken style. + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) diff -Nru python-boto-2.20.1/tests/integration/ec2/autoscale/test_connection.py python-boto-2.29.1/tests/integration/ec2/autoscale/test_connection.py --- python-boto-2.20.1/tests/integration/ec2/autoscale/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/ec2/autoscale/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -53,47 +53,47 @@ groups = c.get_all_groups() for group in groups: - self.assertTrue(type(group), AutoScalingGroup) + self.assertIsInstance(group, AutoScalingGroup) # get activities activities = group.get_activities() for activity in activities: - self.assertEqual(type(activity), Activity) + self.assertIsInstance(activity, Activity) # get launch configs configs = c.get_all_launch_configurations() for config in configs: - self.assertTrue(type(config), LaunchConfiguration) + self.assertIsInstance(config, LaunchConfiguration) # get policies policies = c.get_all_policies() for policy in policies: - self.assertTrue(type(policy), ScalingPolicy) + self.assertIsInstance(policy, ScalingPolicy) # get scheduled actions actions = c.get_all_scheduled_actions() for action in actions: - self.assertTrue(type(action), ScheduledUpdateGroupAction) + self.assertIsInstance(action, ScheduledUpdateGroupAction) # get instances instances = c.get_all_autoscaling_instances() for instance in instances: - self.assertTrue(type(instance), Instance) + self.assertIsInstance(instance, Instance) # get all scaling process types ptypes = c.get_all_scaling_process_types() for ptype in ptypes: - self.assertTrue(type(ptype), ProcessType) + self.assertTrue(ptype, ProcessType) # get adjustment types adjustments = c.get_all_adjustment_types() for adjustment in adjustments: - self.assertTrue(type(adjustment), AdjustmentType) + self.assertIsInstance(adjustment, AdjustmentType) # get metrics collection types types = c.get_all_metric_collection_types() - self.assertTrue(type(types), MetricCollectionTypes) + self.assertIsInstance(types, MetricCollectionTypes) # create the simplest possible AutoScale group # first create the launch configuration diff -Nru python-boto-2.20.1/tests/integration/ec2/elb/test_connection.py python-boto-2.29.1/tests/integration/ec2/elb/test_connection.py --- python-boto-2.20.1/tests/integration/ec2/elb/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/ec2/elb/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -24,6 +24,8 @@ Initial, and very limited, unit tests for ELBConnection. """ +import boto +import time import unittest from boto.ec2.elb import ELBConnection @@ -39,6 +41,19 @@ self.listeners = [(80, 8000, 'HTTP')] self.balancer = self.conn.create_load_balancer(self.name, self.availability_zones, self.listeners) + # S3 bucket for log tests + self.s3 = boto.connect_s3() + self.timestamp = str(int(time.time())) + self.bucket_name = 'boto-elb-%s' % self.timestamp + self.bucket = self.s3.create_bucket(self.bucket_name) + self.bucket.set_canned_acl('public-read-write') + self.addCleanup(self.cleanup_bucket, self.bucket) + + def cleanup_bucket(self, bucket): + for key in bucket.get_all_keys(): + key.delete() + bucket.delete() + def tearDown(self): """ Deletes the test load balancer after every test. It does not delete EVERY load balancer in your account""" @@ -161,5 +176,95 @@ sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners) ) + def test_load_balancer_access_log(self): + attributes = self.balancer.get_attributes() + + self.assertEqual(False, attributes.access_log.enabled) + + attributes.access_log.enabled = True + attributes.access_log.s3_bucket_name = self.bucket_name + attributes.access_log.s3_bucket_prefix = 'access-logs' + attributes.access_log.emit_interval = 5 + + self.conn.modify_lb_attribute(self.balancer.name, 'accessLog', + attributes.access_log) + + new_attributes = self.balancer.get_attributes() + + self.assertEqual(True, new_attributes.access_log.enabled) + self.assertEqual(self.bucket_name, + new_attributes.access_log.s3_bucket_name) + self.assertEqual('access-logs', + new_attributes.access_log.s3_bucket_prefix) + self.assertEqual(5, new_attributes.access_log.emit_interval) + + def test_load_balancer_get_attributes(self): + attributes = self.balancer.get_attributes() + connection_draining = self.conn.get_lb_attribute(self.balancer.name, + 'ConnectionDraining') + self.assertEqual(connection_draining.enabled, + attributes.connection_draining.enabled) + self.assertEqual(connection_draining.timeout, + attributes.connection_draining.timeout) + + access_log = self.conn.get_lb_attribute(self.balancer.name, + 'AccessLog') + self.assertEqual(access_log.enabled, attributes.access_log.enabled) + self.assertEqual(access_log.s3_bucket_name, attributes.access_log.s3_bucket_name) + self.assertEqual(access_log.s3_bucket_prefix, attributes.access_log.s3_bucket_prefix) + self.assertEqual(access_log.emit_interval, attributes.access_log.emit_interval) + + cross_zone_load_balancing = self.conn.get_lb_attribute(self.balancer.name, + 'CrossZoneLoadBalancing') + self.assertEqual(cross_zone_load_balancing, + attributes.cross_zone_load_balancing.enabled) + + def change_and_verify_load_balancer_connection_draining(self, enabled, timeout = None): + attributes = self.balancer.get_attributes() + + attributes.connection_draining.enabled = enabled + if timeout != None: + attributes.connection_draining.timeout = timeout + + self.conn.modify_lb_attribute(self.balancer.name, + 'ConnectionDraining', attributes.connection_draining) + + attributes = self.balancer.get_attributes() + self.assertEqual(enabled, attributes.connection_draining.enabled) + if timeout != None: + self.assertEqual(timeout, attributes.connection_draining.timeout) + + def test_load_balancer_connection_draining_config(self): + self.change_and_verify_load_balancer_connection_draining(True, 128) + self.change_and_verify_load_balancer_connection_draining(True, 256) + self.change_and_verify_load_balancer_connection_draining(False) + self.change_and_verify_load_balancer_connection_draining(True, 64) + + def test_set_load_balancer_policies_of_listeners(self): + more_listeners = [(443, 8001, 'HTTP')] + self.conn.create_load_balancer_listeners(self.name, more_listeners) + + lb_policy_name = 'lb-policy' + self.conn.create_lb_cookie_stickiness_policy( + 1000, + self.name, + lb_policy_name + ) + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + lb_policy_name + ) + + # Try to remove the policy by passing empty list. + # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html + # documents this as the way to remove policies. + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + [] + ) + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/integration/ec2/vpc/test_connection.py python-boto-2.29.1/tests/integration/ec2/vpc/test_connection.py --- python-boto-2.20.1/tests/integration/ec2/vpc/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/ec2/vpc/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -30,30 +30,64 @@ class TestVPCConnection(unittest.TestCase): + def setUp(self): + # Registry of instances to be removed + self.instances = [] + # Registry for cleaning up the vpc after all instances are terminated + # in the format [ ( func, (arg1, ... argn) ) ] + self.post_terminate_cleanups = [] + self.api = boto.connect_vpc() - vpc = self.api.create_vpc('10.0.0.0/16') - self.addCleanup(self.api.delete_vpc, vpc.id) + self.vpc = self.api.create_vpc('10.0.0.0/16') # Need time for the VPC to be in place. :/ time.sleep(5) - self.subnet = self.api.create_subnet(vpc.id, '10.0.0.0/24') - self.addCleanup(self.api.delete_subnet, self.subnet.id) + self.subnet = self.api.create_subnet(self.vpc.id, '10.0.0.0/24') + # Register the subnet to be deleted after instance termination + self.post_terminate_cleanups.append((self.api.delete_subnet, (self.subnet.id,))) # Need time for the subnet to be in place. time.sleep(10) + def post_terminate_cleanup(self): + """Helper to run clean up tasks after instances are removed.""" + for fn, args in self.post_terminate_cleanups: + fn(*args) + # Give things time to catch up each time + time.sleep(10) + + # Now finally delete the vpc + if self.vpc: + self.api.delete_vpc(self.vpc.id) + + def terminate_instances(self): + """Helper to remove all instances and kick off additional cleanup + once they are terminated. + """ + for instance in self.instances: + self.terminate_instance(instance) + self.post_terminate_cleanup() + def terminate_instance(self, instance): instance.terminate() for i in xrange(300): instance.update() if instance.state == 'terminated': # Give it a litle more time to settle. - time.sleep(10) + time.sleep(30) return else: time.sleep(10) + def delete_elastic_ip(self, eip): + # Fetch a new copy of the eip so we're up to date + new_eip = self.api.get_all_addresses([eip.public_ip])[0] + if new_eip.association_id: + new_eip.disassociate() + new_eip.release() + time.sleep(10) + def test_multi_ip_create(self): interface = NetworkInterfaceSpecification( device_index=0, subnet_id=self.subnet.id, @@ -111,7 +145,8 @@ network_interfaces=interfaces ) instance = reservation.instances[0] - self.addCleanup(self.terminate_instance, instance) + self.instances.append(instance) + self.addCleanup(self.terminate_instances) # Give it a **LONG** time to start up. # Because the public IP won't be there right away. @@ -135,6 +170,43 @@ # resembles an IP (& isn't empty/``None``)... self.assertTrue(interface.publicIp.count('.') >= 3) + def test_associate_elastic_ip(self): + interface = NetworkInterfaceSpecification( + associate_public_ip_address=False, + subnet_id=self.subnet.id, + # Just for testing. + delete_on_termination=True + ) + interfaces = NetworkInterfaceCollection(interface) + + reservation = self.api.run_instances( + image_id='ami-a0cd60c9', + instance_type='m1.small', + network_interfaces=interfaces + ) + instance = reservation.instances[0] + # Register instance to be removed + self.instances.append(instance) + # Add terminate instances helper as cleanup command + self.addCleanup(self.terminate_instances) + + # Create an internet gateway so we can attach an eip + igw = self.api.create_internet_gateway() + # Wait on gateway before attaching + time.sleep(5) + # Attach and register clean up tasks + self.api.attach_internet_gateway(igw.id, self.vpc.id) + self.post_terminate_cleanups.append((self.api.detach_internet_gateway, (igw.id, self.vpc.id))) + self.post_terminate_cleanups.append((self.api.delete_internet_gateway, (igw.id,))) + + # Allocate an elastic ip to this vpc + eip = self.api.allocate_address('vpc') + self.post_terminate_cleanups.append((self.delete_elastic_ip, (eip,))) + + # Wait on instance and eip then try to associate directly to instance + time.sleep(60) + eip.associate(instance.id) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/integration/gs/test_basic.py python-boto-2.29.1/tests/integration/gs/test_basic.py --- python-boto-2.20.1/tests/integration/gs/test_basic.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/gs/test_basic.py 2014-05-30 20:49:34.000000000 +0000 @@ -31,6 +31,7 @@ import os import re import StringIO +import urllib import xml.sax from boto import handler @@ -99,6 +100,11 @@ # check to make sure content read from gcs is identical to original self.assertEqual(s1, fp.read()) fp.close() + # Use generate_url to get the contents + url = self._conn.generate_url(900, 'GET', bucket=bucket.name, key=key_name) + f = urllib.urlopen(url) + self.assertEqual(s1, f.read()) + f.close() # check to make sure set_contents_from_file is working sfp = StringIO.StringIO('foo') k.set_contents_from_file(sfp) diff -Nru python-boto-2.20.1/tests/integration/__init__.py python-boto-2.29.1/tests/integration/__init__.py --- python-boto-2.20.1/tests/integration/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -42,21 +42,21 @@ self.assertTrue(len(self.regions) > 0) for region in self.regions: + special_access_required = False + + for snippet in ('gov', 'cn-'): + if snippet in region.name: + special_access_required = True + break + try: c = region.connect() self.sample_service_call(c) - except (socket.gaierror, httplib.BadStatusLine): + except: # This is bad (because the SSL cert failed). Re-raise the # exception. - raise - except: - if 'gov' in region.name: - # Ignore it. GovCloud accounts require special permission - # to use. - continue - - # Anything else is bad. Re-raise. - raise + if not special_access_required: + raise def sample_service_call(self, conn): """ diff -Nru python-boto-2.20.1/tests/integration/kinesis/test_cert_verification.py python-boto-2.29.1/tests/integration/kinesis/test_cert_verification.py --- python-boto-2.20.1/tests/integration/kinesis/test_cert_verification.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/kinesis/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.kinesis + + +class KinesisCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + kinesis = True + regions = boto.kinesis.regions() + + def sample_service_call(self, conn): + conn.list_streams() diff -Nru python-boto-2.20.1/tests/integration/kinesis/test_kinesis.py python-boto-2.29.1/tests/integration/kinesis/test_kinesis.py --- python-boto-2.20.1/tests/integration/kinesis/test_kinesis.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/kinesis/test_kinesis.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,9 +20,11 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import boto import time +import boto +from boto.kinesis.exceptions import ResourceNotFoundException + from unittest import TestCase @@ -34,15 +36,12 @@ def setUp(self): self.kinesis = boto.connect_kinesis() - def tearDown(self): - # Delete the stream even if there is a failure - self.kinesis.delete_stream('test') - def test_kinesis(self): kinesis = self.kinesis # Create a new stream kinesis.create_stream('test', 1) + self.addCleanup(self.kinesis.delete_stream, 'test') # Wait for the stream to be ready tries = 0 @@ -70,7 +69,7 @@ while tries < 100: tries += 1 time.sleep(1) - + response = kinesis.get_records(shard_iterator) shard_iterator = response['NextShardIterator'] @@ -82,3 +81,11 @@ # Read the data, which should be the same as what we wrote self.assertEqual(1, len(response['Records'])) self.assertEqual(data, response['Records'][0]['Data']) + + def test_describe_non_existent_stream(self): + with self.assertRaises(ResourceNotFoundException) as cm: + self.kinesis.describe_stream('this-stream-shouldnt-exist') + + # Assert things about the data we passed along. + self.assertEqual(cm.exception.error_code, None) + self.assertTrue('not found' in cm.exception.message) diff -Nru python-boto-2.20.1/tests/integration/mws/test.py python-boto-2.29.1/tests/integration/mws/test.py --- python-boto-2.20.1/tests/integration/mws/test.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/mws/test.py 2014-05-30 20:49:34.000000000 +0000 @@ -71,8 +71,9 @@ response = self.mws.get_product_categories_for_asin( MarketplaceId=self.marketplace_id, ASIN=asin) - result = response._result - self.assertTrue(int(result.Self.ProductCategoryId) == 21) + self.assertEqual(len(response._result.Self), 3) + categoryids = [x.ProductCategoryId for x in response._result.Self] + self.assertSequenceEqual(categoryids, ['285856', '21', '491314']) @unittest.skipUnless(simple and isolator, "skipping simple test") def test_list_matching_products(self): diff -Nru python-boto-2.20.1/tests/integration/opsworks/test_layer1.py python-boto-2.29.1/tests/integration/opsworks/test_layer1.py --- python-boto-2.20.1/tests/integration/opsworks/test_layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/opsworks/test_layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,13 +20,15 @@ # IN THE SOFTWARE. # import unittest -import time +from boto.exception import JSONResponseError +from boto.opsworks import connect_to_region, regions, RegionInfo from boto.opsworks.layer1 import OpsWorksConnection -from boto.opsworks.exceptions import ValidationException class TestOpsWorksConnection(unittest.TestCase): + opsworks = True + def setUp(self): self.api = OpsWorksConnection() @@ -35,6 +37,18 @@ self.assertIn('Stacks', response) def test_validation_errors(self): - with self.assertRaises(ValidationException): + with self.assertRaises(JSONResponseError): self.api.create_stack('testbotostack', 'us-east-1', 'badarn', 'badarn2') + + +class TestOpsWorksHelpers(unittest.TestCase): + opsworks = True + + def test_regions(self): + response = regions() + self.assertIsInstance(response[0], RegionInfo) + + def test_connect_to_region(self): + connection = connect_to_region('us-east-1') + self.assertIsInstance(connection, OpsWorksConnection) diff -Nru python-boto-2.20.1/tests/integration/rds2/__init__.py python-boto-2.29.1/tests/integration/rds2/__init__.py --- python-boto-2.20.1/tests/integration/rds2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/rds2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff -Nru python-boto-2.20.1/tests/integration/rds2/test_cert_verification.py python-boto-2.29.1/tests/integration/rds2/test_cert_verification.py --- python-boto-2.20.1/tests/integration/rds2/test_cert_verification.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/rds2/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.rds2 + + +class RDSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + rds = True + regions = boto.rds2.regions() + + def sample_service_call(self, conn): + conn.describe_db_instances() diff -Nru python-boto-2.20.1/tests/integration/rds2/test_connection.py python-boto-2.29.1/tests/integration/rds2/test_connection.py --- python-boto-2.20.1/tests/integration/rds2/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/rds2/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import unittest +import time +from boto.rds2.layer1 import RDSConnection + + +class TestRDS2Connection(unittest.TestCase): + rds = True + + def setUp(self): + self.conn = RDSConnection() + self.db_name = "test-db-%s" % str(int(time.time())) + + def test_connect_rds(self): + # Upon release, this did not function correct. Ensure that + # args are passed correctly. + import boto + conn = boto.connect_rds2() + + def test_integration(self): + resp = self.conn.create_db_instance( + db_instance_identifier=self.db_name, + allocated_storage=5, + db_instance_class='db.t1.micro', + engine='postgres', + master_username='bototestuser', + master_user_password='testtestt3st', + # Try to limit the impact & test options. + multi_az=False, + backup_retention_period=0 + ) + self.addCleanup( + self.conn.delete_db_instance, + self.db_name, + skip_final_snapshot=True + ) + + # Wait for 6 minutes for it to come up. + time.sleep(60 * 6) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'available') + self.assertEqual(inst['Engine'], 'postgres') + self.assertEqual(inst['AllocatedStorage'], 5) + + # Try renaming it. + resp = self.conn.modify_db_instance( + self.db_name, + allocated_storage=10, + apply_immediately=True + ) + + # Give it a chance to start modifying... + time.sleep(60) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'modifying') + self.assertEqual(inst['Engine'], 'postgres') + + # ...then finish the remainder of 10 minutes for the change. + time.sleep(60 * 9) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'available') + self.assertEqual(inst['Engine'], 'postgres') + self.assertEqual(inst['AllocatedStorage'], 10) diff -Nru python-boto-2.20.1/tests/integration/route53/__init__.py python-boto-2.29.1/tests/integration/route53/__init__.py --- python-boto-2.20.1/tests/integration/route53/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Tellybug, Matt Millar # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -18,3 +19,20 @@ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. + +import time +import unittest +from nose.plugins.attrib import attr +from boto.route53.connection import Route53Connection + +@attr(route53=True) +class Route53TestCase(unittest.TestCase): + def setUp(self): + super(Route53TestCase, self).setUp() + self.conn = Route53Connection() + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + self.zone = self.conn.create_zone(self.base_domain) + + def tearDown(self): + self.zone.delete() + super(Route53TestCase, self).tearDown() diff -Nru python-boto-2.20.1/tests/integration/route53/test_alias_resourcerecordsets.py python-boto-2.29.1/tests/integration/route53/test_alias_resourcerecordsets.py --- python-boto-2.20.1/tests/integration/route53/test_alias_resourcerecordsets.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/test_alias_resourcerecordsets.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,116 @@ +# Copyright (c) 2014 Netflix, Inc. Stefan Praszalowicz +# Copyright (c) 2014 42Lines, Inc. Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +import unittest +from boto.route53.connection import Route53Connection +from boto.route53.record import ResourceRecordSets +from boto.route53.exception import DNSServerError + + +class TestRoute53AliasResourceRecordSets(unittest.TestCase): + route53 = True + + def setUp(self): + super(TestRoute53AliasResourceRecordSets, self).setUp() + self.conn = Route53Connection() + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + self.zone = self.conn.create_zone(self.base_domain) + # a standard record to use as the target for our alias + self.zone.add_a('target.%s' % self.base_domain, '102.11.23.1') + + def tearDown(self): + self.zone.delete_a('target.%s' % self.base_domain) + self.zone.delete() + super(TestRoute53AliasResourceRecordSets, self).tearDown() + + def test_incomplete_add_alias_failure(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + alias_dns_name="target.%s" % self.base_domain, + alias_hosted_zone_id=self.zone.id, + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="UPSERT", **base_record) + + try: + self.assertRaises(DNSServerError, rrs.commit) + except: + # if the call somehow goes through, delete our unexpected new record before failing test + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="DELETE", **base_record) + rrs.commit() + raise + + def test_add_alias(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + alias_evaluate_target_health=False, + alias_dns_name="target.%s" % self.base_domain, + alias_hosted_zone_id=self.zone.id, + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="UPSERT", **base_record) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="DELETE", **base_record) + rrs.commit() + + + def test_set_alias(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + new = rrs.add_change(action="UPSERT", **base_record) + new.set_alias(self.zone.id, "target.%s" % self.base_domain, False) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + delete = rrs.add_change(action="DELETE", **base_record) + delete.set_alias(self.zone.id, "target.%s" % self.base_domain, False) + rrs.commit() + + + def test_set_alias_backwards_compatability(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + new = rrs.add_change(action="UPSERT", **base_record) + new.set_alias(self.zone.id, "target.%s" % self.base_domain) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + delete = rrs.add_change(action="DELETE", **base_record) + delete.set_alias(self.zone.id, "target.%s" % self.base_domain) + rrs.commit() + + +if __name__ == '__main__': + unittest.main() diff -Nru python-boto-2.20.1/tests/integration/route53/test_cert_verification.py python-boto-2.29.1/tests/integration/route53/test_cert_verification.py --- python-boto-2.20.1/tests/integration/route53/test_cert_verification.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/test_cert_verification.py 2014-05-30 20:49:34.000000000 +0000 @@ -25,12 +25,14 @@ Check that all of the certs on all service endpoints validate. """ import unittest +from nose.plugins.attrib import attr from tests.integration import ServiceCertVerificationTest import boto.route53 +@attr(route53=True) class Route53CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): route53 = True regions = boto.route53.regions() diff -Nru python-boto-2.20.1/tests/integration/route53/test_health_check.py python-boto-2.29.1/tests/integration/route53/test_health_check.py --- python-boto-2.20.1/tests/integration/route53/test_health_check.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/test_health_check.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,164 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.integration.route53 import Route53TestCase + +from boto.route53.healthcheck import HealthCheck +from boto.route53.record import ResourceRecordSets + +class TestRoute53HealthCheck(Route53TestCase): + def test_create_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_https_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS", resource_path="/testing") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + + def test_create_and_list_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result1 = self.conn.create_health_check(hc) + hc = HealthCheck(ip_addr="54.217.7.119", port=80, hc_type="HTTP", resource_path="/testing") + result2 = self.conn.create_health_check(hc) + result = self.conn.get_list_health_checks() + self.assertTrue(len(result['ListHealthChecksResponse']['HealthChecks']) > 1) + self.conn.delete_health_check(result1['CreateHealthCheckResponse']['HealthCheck']['Id']) + self.conn.delete_health_check(result2['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_delete_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + hc_id = result['CreateHealthCheckResponse']['HealthCheck']['Id'] + result = self.conn.get_list_health_checks() + found = False + for hc in result['ListHealthChecksResponse']['HealthChecks']: + if hc['Id'] == hc_id: + found = True + break + self.assertTrue(found) + result = self.conn.delete_health_check(hc_id) + result = self.conn.get_list_health_checks() + for hc in result['ListHealthChecksResponse']['HealthChecks']: + self.assertFalse(hc['Id'] == hc_id) + + def test_create_health_check_string_match(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP_STR_MATCH", resource_path="/testing", string_match="test") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP_STR_MATCH') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_health_check_https_string_match(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS_STR_MATCH", resource_path="/testing", string_match="test") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS_STR_MATCH') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_resource_record_set(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + records = ResourceRecordSets( + connection=self.conn, hosted_zone_id=self.zone.id, comment='Create DNS entry for test') + change = records.add_change('CREATE', 'unittest.%s.' % self.base_domain, 'A', ttl=30, identifier='test', + weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id']) + change.add_value("54.217.7.118") + records.commit() + + records = ResourceRecordSets(self.conn, self.zone.id) + deleted = records.add_change('DELETE', "unittest.%s." % self.base_domain, "A", ttl=30, identifier='test', + weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id']) + deleted.add_value('54.217.7.118') + records.commit() + + def test_create_health_check_invalid_request_interval(self): + """Test that health checks cannot be created with an invalid + 'request_interval'. + + """ + with self.assertRaises(AttributeError): + HealthCheck(**self.health_check_params(request_interval=5)) + + def test_create_health_check_invalid_failure_threshold(self): + """ + Test that health checks cannot be created with an invalid + 'failure_threshold'. + """ + with self.assertRaises(AttributeError): + HealthCheck(**self.health_check_params(failure_threshold=0)) + with self.assertRaises(AttributeError): + HealthCheck(**self.health_check_params(failure_threshold=11)) + + def test_create_health_check_request_interval(self): + hc_params = self.health_check_params(request_interval=10) + hc = HealthCheck(**hc_params) + result = self.conn.create_health_check(hc) + hc_config = (result[u'CreateHealthCheckResponse'] + [u'HealthCheck'][u'HealthCheckConfig']) + self.assertEquals(hc_config[u'RequestInterval'], + unicode(hc_params['request_interval'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_health_check_failure_threshold(self): + hc_params = self.health_check_params(failure_threshold=1) + hc = HealthCheck(**hc_params) + result = self.conn.create_health_check(hc) + hc_config = (result[u'CreateHealthCheckResponse'] + [u'HealthCheck'][u'HealthCheckConfig']) + self.assertEquals(hc_config[u'FailureThreshold'], + unicode(hc_params['failure_threshold'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def health_check_params(self, **kwargs): + params = { + 'ip_addr': "54.217.7.118", + 'port': 80, + 'hc_type': 'HTTP', + 'resource_path': '/testing', + } + params.update(kwargs) + return params diff -Nru python-boto-2.20.1/tests/integration/route53/test_resourcerecordsets.py python-boto-2.29.1/tests/integration/route53/test_resourcerecordsets.py --- python-boto-2.20.1/tests/integration/route53/test_resourcerecordsets.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/test_resourcerecordsets.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,30 +20,23 @@ # IN THE SOFTWARE. # +import time import unittest +from tests.integration.route53 import Route53TestCase + from boto.route53.connection import Route53Connection from boto.route53.record import ResourceRecordSets - -class TestRoute53ResourceRecordSets(unittest.TestCase): - def setUp(self): - super(TestRoute53ResourceRecordSets, self).setUp() - self.conn = Route53Connection() - self.zone = self.conn.create_zone('example.com') - - def tearDown(self): - self.zone.delete() - super(TestRoute53ResourceRecordSets, self).tearDown() - +class TestRoute53ResourceRecordSets(Route53TestCase): def test_add_change(self): rrs = ResourceRecordSets(self.conn, self.zone.id) - created = rrs.add_change("CREATE", "vpn.example.com.", "A") + created = rrs.add_change("CREATE", "vpn.%s." % self.base_domain, "A") created.add_value('192.168.0.25') rrs.commit() rrs = ResourceRecordSets(self.conn, self.zone.id) - deleted = rrs.add_change('DELETE', "vpn.example.com.", "A") + deleted = rrs.add_change('DELETE', "vpn.%s." % self.base_domain, "A") deleted.add_value('192.168.0.25') rrs.commit() @@ -52,7 +45,7 @@ hosts = 101 for hostid in range(hosts): - rec = "test" + str(hostid) + ".example.com" + rec = "test" + str(hostid) + ".%s" % self.base_domain created = rrs.add_change("CREATE", rec, "A") ip = '192.168.0.' + str(hostid) created.add_value(ip) @@ -79,7 +72,7 @@ # Cleanup indivual records rrs = ResourceRecordSets(self.conn, self.zone.id) for hostid in range(hosts): - rec = "test" + str(hostid) + ".example.com" + rec = "test" + str(hostid) + ".%s" % self.base_domain deleted = rrs.add_change("DELETE", rec, "A") ip = '192.168.0.' + str(hostid) deleted.add_value(ip) diff -Nru python-boto-2.20.1/tests/integration/route53/test_zone.py python-boto-2.29.1/tests/integration/route53/test_zone.py --- python-boto-2.20.1/tests/integration/route53/test_zone.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/route53/test_zone.py 2014-05-30 20:49:34.000000000 +0000 @@ -22,63 +22,89 @@ # IN THE SOFTWARE. # +import time import unittest +from nose.plugins.attrib import attr from boto.route53.connection import Route53Connection from boto.exception import TooManyRecordsException +@attr(route53=True) class TestRoute53Zone(unittest.TestCase): @classmethod def setUpClass(self): route53 = Route53Connection() - zone = route53.get_zone('example.com') + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + zone = route53.get_zone(self.base_domain) if zone is not None: zone.delete() - self.zone = route53.create_zone('example.com') + self.zone = route53.create_zone(self.base_domain) def test_nameservers(self): self.zone.get_nameservers() def test_a(self): - self.zone.add_a('example.com', '102.11.23.1', 80) - record = self.zone.get_a('example.com') - self.assertEquals(record.name, u'example.com.') + self.zone.add_a(self.base_domain, '102.11.23.1', 80) + record = self.zone.get_a(self.base_domain) + self.assertEquals(record.name, u'%s.' % self.base_domain) self.assertEquals(record.resource_records, [u'102.11.23.1']) self.assertEquals(record.ttl, u'80') - self.zone.update_a('example.com', '186.143.32.2', '800') - record = self.zone.get_a('example.com') - self.assertEquals(record.name, u'example.com.') + self.zone.update_a(self.base_domain, '186.143.32.2', '800') + record = self.zone.get_a(self.base_domain) + self.assertEquals(record.name, u'%s.' % self.base_domain) self.assertEquals(record.resource_records, [u'186.143.32.2']) self.assertEquals(record.ttl, u'800') def test_cname(self): - self.zone.add_cname('www.example.com', 'webserver.example.com', 200) - record = self.zone.get_cname('www.example.com') - self.assertEquals(record.name, u'www.example.com.') - self.assertEquals(record.resource_records, [u'webserver.example.com.']) + self.zone.add_cname( + 'www.%s' % self.base_domain, + 'webserver.%s' % self.base_domain, + 200 + ) + record = self.zone.get_cname('www.%s' % self.base_domain) + self.assertEquals(record.name, u'www.%s.' % self.base_domain) + self.assertEquals(record.resource_records, [ + u'webserver.%s.' % self.base_domain + ]) self.assertEquals(record.ttl, u'200') - self.zone.update_cname('www.example.com', 'web.example.com', 45) - record = self.zone.get_cname('www.example.com') - self.assertEquals(record.name, u'www.example.com.') - self.assertEquals(record.resource_records, [u'web.example.com.']) + self.zone.update_cname( + 'www.%s' % self.base_domain, + 'web.%s' % self.base_domain, + 45 + ) + record = self.zone.get_cname('www.%s' % self.base_domain) + self.assertEquals(record.name, u'www.%s.' % self.base_domain) + self.assertEquals(record.resource_records, [ + u'web.%s.' % self.base_domain + ]) self.assertEquals(record.ttl, u'45') def test_mx(self): - self.zone.add_mx('example.com', - ['10 mx1.example.com', '20 mx2.example.com'], - 1000) - record = self.zone.get_mx('example.com') + self.zone.add_mx( + self.base_domain, + [ + '10 mx1.%s' % self.base_domain, + '20 mx2.%s' % self.base_domain, + ], + 1000 + ) + record = self.zone.get_mx(self.base_domain) self.assertEquals(set(record.resource_records), - set([u'10 mx1.example.com.', - u'20 mx2.example.com.'])) + set([u'10 mx1.%s.' % self.base_domain, + u'20 mx2.%s.' % self.base_domain])) self.assertEquals(record.ttl, u'1000') - self.zone.update_mx('example.com', - ['10 mail1.example.com', '20 mail2.example.com'], - 50) - record = self.zone.get_mx('example.com') + self.zone.update_mx( + self.base_domain, + [ + '10 mail1.%s' % self.base_domain, + '20 mail2.%s' % self.base_domain, + ], + 50 + ) + record = self.zone.get_mx(self.base_domain) self.assertEquals(set(record.resource_records), - set([u'10 mail1.example.com.', - '20 mail2.example.com.'])) + set([u'10 mail1.%s.' % self.base_domain, + '20 mail2.%s.' % self.base_domain])) self.assertEquals(record.ttl, u'50') def test_get_records(self): @@ -92,40 +118,48 @@ route53.get_zones() def test_identifiers_wrrs(self): - self.zone.add_a('wrr.example.com', '1.2.3.4', + self.zone.add_a('wrr.%s' % self.base_domain, '1.2.3.4', identifier=('foo', '20')) - self.zone.add_a('wrr.example.com', '5.6.7.8', + self.zone.add_a('wrr.%s' % self.base_domain, '5.6.7.8', identifier=('bar', '10')) - wrrs = self.zone.find_records('wrr.example.com', 'A', all=True) + wrrs = self.zone.find_records( + 'wrr.%s' % self.base_domain, + 'A', + all=True + ) self.assertEquals(len(wrrs), 2) - self.zone.delete_a('wrr.example.com', all=True) + self.zone.delete_a('wrr.%s' % self.base_domain, all=True) def test_identifiers_lbrs(self): - self.zone.add_a('lbr.example.com', '4.3.2.1', + self.zone.add_a('lbr.%s' % self.base_domain, '4.3.2.1', identifier=('baz', 'us-east-1')) - self.zone.add_a('lbr.example.com', '8.7.6.5', + self.zone.add_a('lbr.%s' % self.base_domain, '8.7.6.5', identifier=('bam', 'us-west-1')) - lbrs = self.zone.find_records('lbr.example.com', 'A', all=True) + lbrs = self.zone.find_records( + 'lbr.%s' % self.base_domain, + 'A', + all=True + ) self.assertEquals(len(lbrs), 2) - self.zone.delete_a('lbr.example.com', + self.zone.delete_a('lbr.%s' % self.base_domain, identifier=('bam', 'us-west-1')) - self.zone.delete_a('lbr.example.com', + self.zone.delete_a('lbr.%s' % self.base_domain, identifier=('baz', 'us-east-1')) def test_toomany_exception(self): - self.zone.add_a('exception.example.com', '4.3.2.1', + self.zone.add_a('exception.%s' % self.base_domain, '4.3.2.1', identifier=('baz', 'us-east-1')) - self.zone.add_a('exception.example.com', '8.7.6.5', + self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5', identifier=('bam', 'us-west-1')) with self.assertRaises(TooManyRecordsException): - lbrs = self.zone.get_a('exception.example.com') - self.zone.delete_a('exception.example.com', all=True) + lbrs = self.zone.get_a('exception.%s' % self.base_domain) + self.zone.delete_a('exception.%s' % self.base_domain, all=True) @classmethod def tearDownClass(self): - self.zone.delete_a('example.com') - self.zone.delete_cname('www.example.com') - self.zone.delete_mx('example.com') + self.zone.delete_a(self.base_domain) + self.zone.delete_cname('www.%s' % self.base_domain) + self.zone.delete_mx(self.base_domain) self.zone.delete() if __name__ == '__main__': diff -Nru python-boto-2.20.1/tests/integration/s3/test_bucket.py python-boto-2.29.1/tests/integration/s3/test_bucket.py --- python-boto-2.20.1/tests/integration/s3/test_bucket.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/s3/test_bucket.py 2014-05-30 20:49:34.000000000 +0000 @@ -34,10 +34,10 @@ from boto.s3.bucketlogging import BucketLogging from boto.s3.lifecycle import Lifecycle from boto.s3.lifecycle import Transition +from boto.s3.lifecycle import Expiration from boto.s3.lifecycle import Rule from boto.s3.acl import Grant from boto.s3.tagging import Tags, TagSet -from boto.s3.lifecycle import Lifecycle, Expiration, Transition from boto.s3.website import RedirectLocation @@ -261,3 +261,22 @@ self.assertEqual(rule.expiration.days, days) #Note: Boto seems correct? AWS seems broken? #self.assertEqual(rule.prefix, prefix) + + def test_lifecycle_with_defaults(self): + lifecycle = Lifecycle() + lifecycle.add_rule(expiration=30) + self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) + response = self.bucket.get_lifecycle_config() + self.assertEqual(len(response), 1) + actual_lifecycle = response[0] + self.assertNotEqual(len(actual_lifecycle.id), 0) + self.assertEqual(actual_lifecycle.prefix, '') + + def test_lifecycle_rule_xml(self): + # create a rule directly with id, prefix defaults + rule = Rule(status='Enabled', expiration=30) + s = rule.to_xml() + # Confirm no ID is set in the rule. + self.assertEqual(s.find(""), -1) + # Confirm Prefix is '' and not set to 'None' + self.assertNotEqual(s.find(""), -1) diff -Nru python-boto-2.20.1/tests/integration/s3/test_connection.py python-boto-2.29.1/tests/integration/s3/test_connection.py --- python-boto-2.20.1/tests/integration/s3/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/s3/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -15,7 +15,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -196,7 +196,7 @@ # now try to inject a response header data = k.get_contents_as_string(response_headers={'response-content-type' : 'foo/bar'}) assert k.content_type == 'foo/bar' - + # now delete all keys in bucket for k in bucket: if k.name == 'reduced_redundancy': @@ -224,6 +224,7 @@ # give bucket anon user access and anon read again auth_bucket.set_acl('public-read') + time.sleep(5) try: iter(anon_bucket.list()).next() self.fail("not expecting contents") diff -Nru python-boto-2.20.1/tests/integration/s3/test_key.py python-boto-2.29.1/tests/integration/s3/test_key.py --- python-boto-2.20.1/tests/integration/s3/test_key.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/s3/test_key.py 2014-05-30 20:49:34.000000000 +0000 @@ -401,14 +401,16 @@ key = self.bucket.new_key('test_header_encoding') key.set_metadata('Cache-control', 'public, max-age=500') + key.set_metadata('Test-Plus', u'A plus (+)') key.set_metadata('Content-disposition', u'filename=Schöne Zeit.txt') key.set_contents_from_string('foo') check = self.bucket.get_key('test_header_encoding') self.assertEqual(check.cache_control, 'public, max-age=500') - self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne+Zeit.txt') + self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)') + self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt') self.assertEqual( - urllib.unquote_plus(check.content_disposition).decode('utf-8'), + urllib.unquote(check.content_disposition).decode('utf-8'), 'filename=Schöne Zeit.txt'.decode('utf-8') ) diff -Nru python-boto-2.20.1/tests/integration/s3/test_multipart.py python-boto-2.29.1/tests/integration/s3/test_multipart.py --- python-boto-2.20.1/tests/integration/s3/test_multipart.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/s3/test_multipart.py 2014-05-30 20:49:34.000000000 +0000 @@ -100,6 +100,17 @@ self.assertEqual(lmpu.id, ompu.id) self.assertEqual(0, len(mpus)) + def test_get_all_multipart_uploads(self): + key1 = 'a' + key2 = 'b/c' + mpu1 = self.bucket.initiate_multipart_upload(key1) + mpu2 = self.bucket.initiate_multipart_upload(key2) + rs = self.bucket.get_all_multipart_uploads(prefix='b/', delimiter='/') + for lmpu in rs: + # only expect upload for key2 (mpu2) returned + self.assertEqual(lmpu.key_name, mpu2.key_name) + self.assertEqual(lmpu.id, mpu2.id) + def test_four_part_file(self): key_name = "k" contents = "01234567890123456789" diff -Nru python-boto-2.20.1/tests/integration/sqs/test_bigmessage.py python-boto-2.29.1/tests/integration/sqs/test_bigmessage.py --- python-boto-2.20.1/tests/integration/sqs/test_bigmessage.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/sqs/test_bigmessage.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SQSConnection +""" +from __future__ import with_statement + +import time +from threading import Timer +from tests.unit import unittest +import StringIO + +import boto +from boto.sqs.bigmessage import BigMessage +from boto.exception import SQSError + + +class TestBigMessage(unittest.TestCase): + + sqs = True + + def test_1_basic(self): + c = boto.connect_sqs() + + # create a queue so we can test BigMessage + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue = c.create_queue(queue_name, timeout) + self.addCleanup(c.delete_queue, queue, True) + queue.set_message_class(BigMessage) + + # create a bucket with the same name to store the message in + s3 = boto.connect_s3() + bucket = s3.create_bucket(queue_name) + self.addCleanup(s3.delete_bucket, queue_name) + time.sleep(30) + + # now add a message + msg_body = 'This is a test of the big message' + fp = StringIO.StringIO(msg_body) + s3_url = 's3://%s' % queue_name + message = queue.new_message(fp, s3_url=s3_url) + + queue.write(message) + time.sleep(30) + + s3_object_name = message.s3_url.split('/')[-1] + + # Make sure msg body is in bucket + self.assertTrue(bucket.lookup(s3_object_name)) + + m = queue.read() + self.assertEqual(m.get_body(), msg_body) + + m.delete() + time.sleep(30) + + # Make sure msg is deleted from bucket + self.assertIsNone(bucket.lookup(s3_object_name)) diff -Nru python-boto-2.20.1/tests/integration/sts/test_session_token.py python-boto-2.29.1/tests/integration/sts/test_session_token.py --- python-boto-2.20.1/tests/integration/sts/test_session_token.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/integration/sts/test_session_token.py 2014-05-30 20:49:34.000000000 +0000 @@ -33,7 +33,7 @@ from boto.s3.connection import S3Connection -class SessionTokenTest (unittest.TestCase): +class SessionTokenTest(unittest.TestCase): sts = True def test_session_token(self): diff -Nru python-boto-2.20.1/tests/mturk/reviewable_hits.doctest python-boto-2.29.1/tests/mturk/reviewable_hits.doctest --- python-boto-2.20.1/tests/mturk/reviewable_hits.doctest 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/mturk/reviewable_hits.doctest 2014-05-30 20:49:34.000000000 +0000 @@ -84,10 +84,10 @@ >>> len(assignments_rs) == int(assignments_rs.NumResults) True ->>> assignments_rs.PageNumber -u'1' +>>> int(assignments_rs.PageNumber) +1 ->>> assignments_rs.TotalNumResults >= 1 +>>> int(assignments_rs.TotalNumResults) >= 1 True # should contain at least one Assignment object diff -Nru python-boto-2.20.1/tests/unit/auth/test_sigv4.py python-boto-2.29.1/tests/unit/auth/test_sigv4.py --- python-boto-2.20.1/tests/unit/auth/test_sigv4.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/auth/test_sigv4.py 2014-05-30 20:49:34.000000000 +0000 @@ -19,11 +19,18 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +import copy +import mock from mock import Mock -from tests.unit import unittest +import os +from tests.unit import unittest, MockServiceWithConfigTestCase from boto.auth import HmacAuthV4Handler +from boto.auth import S3HmacAuthV4Handler +from boto.auth import detect_potential_s3sigv4 +from boto.auth import detect_potential_sigv4 from boto.connection import HTTPRequest +from boto.regioninfo import RegionInfo class TestSigV4Handler(unittest.TestCase): @@ -36,19 +43,29 @@ '/-/vaults/foo/archives', None, {}, {'x-amz-glacier-version': '2012-06-01'}, '') + def test_not_adding_empty_qs(self): + self.provider.security_token = None + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', Mock(), self.provider) + req = copy.copy(self.request) + auth.add_auth(req) + self.assertEqual(req.path, '/-/vaults/foo/archives') + def test_inner_whitespace_is_collapsed(self): auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', Mock(), self.provider) self.request.headers['x-amz-archive-description'] = 'two spaces' + self.request.headers['x-amz-quoted-string'] = ' "a b c" ' headers = auth.headers_to_sign(self.request) self.assertEqual(headers, {'Host': 'glacier.us-east-1.amazonaws.com', 'x-amz-archive-description': 'two spaces', - 'x-amz-glacier-version': '2012-06-01'}) + 'x-amz-glacier-version': '2012-06-01', + 'x-amz-quoted-string': ' "a b c" '}) # Note the single space between the "two spaces". self.assertEqual(auth.canonical_headers(headers), 'host:glacier.us-east-1.amazonaws.com\n' 'x-amz-archive-description:two spaces\n' - 'x-amz-glacier-version:2012-06-01') + 'x-amz-glacier-version:2012-06-01\n' + 'x-amz-quoted-string:"a b c"') def test_canonical_query_string(self): auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', @@ -62,6 +79,18 @@ query_string = auth.canonical_query_string(request) self.assertEqual(query_string, 'Foo.1=aaa&Foo.10=zzz') + def test_query_string(self): + auth = HmacAuthV4Handler('sns.us-east-1.amazonaws.com', + Mock(), self.provider) + params = { + 'Message': u'We \u2665 utf-8'.encode('utf-8'), + } + request = HTTPRequest( + 'POST', 'https', 'sns.us-east-1.amazonaws.com', 443, + '/', None, params, {}, '') + query_string = auth.query_string(request) + self.assertEqual(query_string, 'Message=We%20%E2%99%A5%20utf-8') + def test_canonical_uri(self): auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', Mock(), self.provider) @@ -208,3 +237,319 @@ auth.service_name = 'sqs' scope = auth.credential_scope(self.request) self.assertEqual(scope, '20121121/us-west-2/sqs/aws4_request') + + +class TestS3HmacAuthV4Handler(unittest.TestCase): + def setUp(self): + self.provider = Mock() + self.provider.access_key = 'access_key' + self.provider.secret_key = 'secret_key' + self.provider.security_token = 'sekret_tokens' + self.request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + '/awesome-bucket/?max-keys=0', None, {}, + {}, '' + ) + self.awesome_bucket_request = HTTPRequest( + method='GET', + protocol='https', + host='awesome-bucket.s3-us-west-2.amazonaws.com', + port=443, + path='/', + auth_path=None, + params={ + 'max-keys': 0, + }, + headers={ + 'User-Agent': 'Boto', + 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', + 'X-AMZ-Date': '20130605T193245Z', + }, + body='' + ) + self.auth = S3HmacAuthV4Handler( + host='awesome-bucket.s3-us-west-2.amazonaws.com', + config=Mock(), + provider=self.provider, + region_name='s3-us-west-2' + ) + + def test_clean_region_name(self): + # Untouched. + cleaned = self.auth.clean_region_name('us-west-2') + self.assertEqual(cleaned, 'us-west-2') + + # Stripped of the ``s3-`` prefix. + cleaned = self.auth.clean_region_name('s3-us-west-2') + self.assertEqual(cleaned, 'us-west-2') + + # Untouched (classic). + cleaned = self.auth.clean_region_name('s3.amazonaws.com') + self.assertEqual(cleaned, 's3.amazonaws.com') + + # Untouched. + cleaned = self.auth.clean_region_name('something-s3-us-west-2') + self.assertEqual(cleaned, 'something-s3-us-west-2') + + def test_region_stripping(self): + auth = S3HmacAuthV4Handler( + host='s3-us-west-2.amazonaws.com', + config=Mock(), + provider=self.provider + ) + self.assertEqual(auth.region_name, None) + + # What we wish we got. + auth = S3HmacAuthV4Handler( + host='s3-us-west-2.amazonaws.com', + config=Mock(), + provider=self.provider, + region_name='us-west-2' + ) + self.assertEqual(auth.region_name, 'us-west-2') + + # What we actually get (i.e. ``s3-us-west-2``). + self.assertEqual(self.auth.region_name, 'us-west-2') + + def test_determine_region_name(self): + name = self.auth.determine_region_name('s3-us-west-2.amazonaws.com') + self.assertEqual(name, 'us-west-2') + + def test_canonical_uri(self): + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'x/./././x .html', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # S3 doesn't canonicalize the way other SigV4 services do. + # This just urlencoded, no normalization of the path. + self.assertEqual(canonical_uri, 'x/./././x%20.html') + + def test_determine_service_name(self): + # What we wish we got. + name = self.auth.determine_service_name( + 's3.us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we actually get. + name = self.auth.determine_service_name( + 's3-us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we wish we got with virtual hosting. + name = self.auth.determine_service_name( + 'bucket.s3.us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we actually get with virtual hosting. + name = self.auth.determine_service_name( + 'bucket.s3-us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + def test_add_auth(self): + # The side-effects sideshow. + self.assertFalse('x-amz-content-sha256' in self.request.headers) + self.auth.add_auth(self.request) + self.assertTrue('x-amz-content-sha256' in self.request.headers) + the_sha = self.request.headers['x-amz-content-sha256'] + self.assertEqual( + the_sha, + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ) + + def test_host_header(self): + host = self.auth.host_header( + self.awesome_bucket_request.host, + self.awesome_bucket_request + ) + self.assertEqual(host, 'awesome-bucket.s3-us-west-2.amazonaws.com') + + def test_canonical_query_string(self): + qs = self.auth.canonical_query_string(self.awesome_bucket_request) + self.assertEqual(qs, 'max-keys=0') + + def test_correct_handling_of_plus_sign(self): + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'hello+world.txt', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # Ensure that things are properly quoted. + self.assertEqual(canonical_uri, 'hello%2Bworld.txt') + + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'hello%2Bworld.txt', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # Verify double escaping hasn't occurred. + self.assertEqual(canonical_uri, 'hello%2Bworld.txt') + + def test_mangle_path_and_params(self): + request = HTTPRequest( + method='GET', + protocol='https', + host='awesome-bucket.s3-us-west-2.amazonaws.com', + port=443, + # LOOK AT THIS PATH. JUST LOOK AT IT. + path='/?delete&max-keys=0', + auth_path=None, + params={ + 'key': 'why hello there', + # This gets overwritten, to make sure back-compat is maintained. + 'max-keys': 1, + }, + headers={ + 'User-Agent': 'Boto', + 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', + 'X-AMZ-Date': '20130605T193245Z', + }, + body='' + ) + + mod_req = self.auth.mangle_path_and_params(request) + self.assertEqual(mod_req.path, '/?delete&max-keys=0') + self.assertEqual(mod_req.auth_path, '/') + self.assertEqual(mod_req.params, { + 'max-keys': '0', + 'key': 'why hello there', + 'delete': '' + }) + + def test_canonical_request(self): + expected = """GET +/ +max-keys=0 +host:awesome-bucket.s3-us-west-2.amazonaws.com +user-agent:Boto +x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +x-amz-date:20130605T193245Z + +host;user-agent;x-amz-content-sha256;x-amz-date +e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""" + + authed_req = self.auth.canonical_request(self.awesome_bucket_request) + self.assertEqual(authed_req, expected) + + # Now the way ``boto.s3`` actually sends data. + request = copy.copy(self.awesome_bucket_request) + request.path = request.auth_path = '/?max-keys=0' + request.params = {} + expected = """GET +/ +max-keys=0 +host:awesome-bucket.s3-us-west-2.amazonaws.com +user-agent:Boto +x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +x-amz-date:20130605T193245Z + +host;user-agent;x-amz-content-sha256;x-amz-date +e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""" + + # Pre-mangle it. In practice, this happens as part of ``add_auth``, + # but that's a side-effect that's hard to test. + request = self.auth.mangle_path_and_params(request) + authed_req = self.auth.canonical_request(request) + self.assertEqual(authed_req, expected) + + +class FakeS3Connection(object): + def __init__(self, *args, **kwargs): + self.host = kwargs.pop('host', None) + + @detect_potential_s3sigv4 + def _required_auth_capability(self): + return ['nope'] + + def _mexe(self, *args, **kwargs): + pass + + +class FakeEC2Connection(object): + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + + @detect_potential_sigv4 + def _required_auth_capability(self): + return ['nope'] + + def _mexe(self, *args, **kwargs): + pass + + +class TestS3SigV4OptIn(MockServiceWithConfigTestCase): + connection_class = FakeS3Connection + + def test_sigv4_opt_out(self): + # Default is opt-out. + fake = FakeS3Connection(host='s3.amazonaws.com') + self.assertEqual(fake._required_auth_capability(), ['nope']) + + def test_sigv4_non_optional(self): + # Requires SigV4. + fake = FakeS3Connection(host='s3.cn-north-1.amazonaws.com.cn') + self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + + def test_sigv4_opt_in_config(self): + # Opt-in via the config. + self.config = { + 's3': { + 'use-sigv4': True, + }, + } + fake = FakeS3Connection() + self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + + def test_sigv4_opt_in_env(self): + # Opt-in via the ENV. + self.environ['S3_USE_SIGV4'] = True + fake = FakeS3Connection(host='s3.amazonaws.com') + self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + + +class TestSigV4OptIn(MockServiceWithConfigTestCase): + connection_class = FakeEC2Connection + + def setUp(self): + super(TestSigV4OptIn, self).setUp() + self.standard_region = RegionInfo( + name='us-west-2', + endpoint='ec2.us-west-2.amazonaws.com' + ) + self.sigv4_region = RegionInfo( + name='cn-north-1', + endpoint='ec2.cn-north-1.amazonaws.com.cn' + ) + + def test_sigv4_opt_out(self): + # Default is opt-out. + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['nope']) + + def test_sigv4_non_optional(self): + # Requires SigV4. + fake = FakeEC2Connection(region=self.sigv4_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) + + def test_sigv4_opt_in_config(self): + # Opt-in via the config. + self.config = { + 'ec2': { + 'use-sigv4': True, + }, + } + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) + + def test_sigv4_opt_in_env(self): + # Opt-in via the ENV. + self.environ['EC2_USE_SIGV4'] = True + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) diff -Nru python-boto-2.20.1/tests/unit/beanstalk/test_layer1.py python-boto-2.29.1/tests/unit/beanstalk/test_layer1.py --- python-boto-2.20.1/tests/unit/beanstalk/test_layer1.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/beanstalk/test_layer1.py 2014-05-30 20:49:34.000000000 +0000 @@ -117,3 +117,33 @@ 'OptionSettings.member.2.OptionName': 'ENVVAR', 'OptionSettings.member.2.Value': 'VALUE1', }) + + def test_create_environment_with_tier(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_environment( + 'application1', 'environment1', 'version1', + '32bit Amazon Linux running Tomcat 7', + option_settings=[ + ('aws:autoscaling:launchconfiguration', 'Ec2KeyName', + 'mykeypair'), + ('aws:elasticbeanstalk:application:environment', 'ENVVAR', + 'VALUE1')], + tier_name='Worker', tier_type='SQS/HTTP', tier_version='1.0') + self.assert_request_parameters({ + 'Action': 'CreateEnvironment', + 'ApplicationName': 'application1', + 'EnvironmentName': 'environment1', + 'TemplateName': '32bit Amazon Linux running Tomcat 7', + 'ContentType': 'JSON', + 'Version': '2010-12-01', + 'VersionLabel': 'version1', + 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration', + 'OptionSettings.member.1.OptionName': 'Ec2KeyName', + 'OptionSettings.member.1.Value': 'mykeypair', + 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment', + 'OptionSettings.member.2.OptionName': 'ENVVAR', + 'OptionSettings.member.2.Value': 'VALUE1', + 'Tier.Name': 'Worker', + 'Tier.Type': 'SQS/HTTP', + 'Tier.Version': '1.0', + }) diff -Nru python-boto-2.20.1/tests/unit/cloudformation/test_connection.py python-boto-2.29.1/tests/unit/cloudformation/test_connection.py --- python-boto-2.20.1/tests/unit/cloudformation/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudformation/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -11,6 +11,7 @@ from tests.unit import AWSMockServiceTestCase from boto.cloudformation.connection import CloudFormationConnection +from boto.exception import BotoServerError SAMPLE_TEMPLATE = r""" @@ -108,12 +109,24 @@ def test_create_stack_fails(self): self.set_http_response(status_code=400, reason='Bad Request', - body='Invalid arg.') - with self.assertRaises(self.service_connection.ResponseError): + body='{"Error": {"Code": 1, "Message": "Invalid arg."}}') + with self.assertRaisesRegexp(self.service_connection.ResponseError, + 'Invalid arg.'): api_response = self.service_connection.create_stack( 'stack_name', template_body=SAMPLE_TEMPLATE, parameters=[('KeyName', 'myKeyName')]) + def test_create_stack_fail_error(self): + self.set_http_response(status_code=400, reason='Bad Request', + body='{"RequestId": "abc", "Error": {"Code": 1, "Message": "Invalid arg."}}') + try: + api_response = self.service_connection.create_stack( + 'stack_name', template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName')]) + except BotoServerError, e: + self.assertEqual('abc', e.request_id) + self.assertEqual(1, e.error_code) + self.assertEqual('Invalid arg.', e.message) class TestCloudFormationUpdateStack(CloudFormationConnectionBase): def default_body(self): @@ -569,6 +582,10 @@ EC2 KeyPair + Reason + + CAPABILITY_IAM + 0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91 @@ -593,6 +610,11 @@ self.assertEqual(param2.no_echo, True) self.assertEqual(param2.parameter_key, 'KeyName') + self.assertEqual(template.capabilities_reason, 'Reason') + + self.assertEqual(len(template.capabilities), 1) + self.assertEqual(template.capabilities[0].value, 'CAPABILITY_IAM') + self.assert_request_parameters({ 'Action': 'ValidateTemplate', 'TemplateBody': SAMPLE_TEMPLATE, @@ -615,6 +637,82 @@ 'Version': '2010-05-15', }) + +class TestCloudFormationEstimateTemplateCost(CloudFormationConnectionBase): + def default_body(self): + return """ + { + "EstimateTemplateCostResponse": { + "EstimateTemplateCostResult": { + "Url": "http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6" + } + } + } + """ + + def test_estimate_template_cost(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.estimate_template_cost( + template_body='{}') + self.assertEqual(api_response, + 'http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6') + self.assert_request_parameters({ + 'Action': 'EstimateTemplateCost', + 'ContentType': 'JSON', + 'TemplateBody': '{}', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationGetStackPolicy(CloudFormationConnectionBase): + def default_body(self): + return """ + { + "GetStackPolicyResponse": { + "GetStackPolicyResult": { + "StackPolicyBody": "{...}" + } + } + } + """ + + def test_get_stack_policy(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_stack_policy('stack-id') + self.assertEqual(api_response, '{...}') + self.assert_request_parameters({ + 'Action': 'GetStackPolicy', + 'ContentType': 'JSON', + 'StackName': 'stack-id', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase): + def default_body(self): + return """ + { + "SetStackPolicyResponse": { + "SetStackPolicyResult": { + "Some": "content" + } + } + } + """ + + def test_set_stack_policy(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.set_stack_policy('stack-id', + stack_policy_body='{}') + self.assertEqual(api_response['Some'], 'content') + self.assert_request_parameters({ + 'Action': 'SetStackPolicy', + 'ContentType': 'JSON', + 'StackName': 'stack-id', + 'StackPolicyBody': '{}', + 'Version': '2010-05-15', + }) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/cloudformation/test_stack.py python-boto-2.29.1/tests/unit/cloudformation/test_stack.py --- python-boto-2.20.1/tests/unit/cloudformation/test_stack.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudformation/test_stack.py 2014-05-30 20:49:34.000000000 +0000 @@ -201,7 +201,7 @@ datetime.datetime(2011, 3, 10, 16, 20, 51, 575757) ) - def test_list_stacks_time_with_millis(self): + def test_list_stacks_time_with_millis_again(self): rs = boto.resultset.ResultSet([ ('member', boto.cloudformation.stack.StackResourceSummary) ]) diff -Nru python-boto-2.20.1/tests/unit/cloudfront/test_connection.py python-boto-2.29.1/tests/unit/cloudfront/test_connection.py --- python-boto-2.20.1/tests/unit/cloudfront/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudfront/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,204 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.cloudfront import CloudFrontConnection +from boto.cloudfront.distribution import Distribution, DistributionConfig, DistributionSummary +from boto.cloudfront.origin import CustomOrigin + + +class TestCloudFrontConnection(AWSMockServiceTestCase): + connection_class = CloudFrontConnection + + def setUp(self): + super(TestCloudFrontConnection, self).setUp() + + def test_get_all_distributions(self): + body = """ + + + 100 + false + + EEEEEEEEEEEEE + InProgress + 2014-02-03T11:03:41.087Z + abcdef12345678.cloudfront.net + + example.com + 80 + 443 + http-only + + static.example.com + true + + + """ + self.set_http_response(status_code=200, body=body) + response = self.service_connection.get_all_distributions() + + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], DistributionSummary)) + self.assertEqual(response[0].id, "EEEEEEEEEEEEE") + self.assertEqual(response[0].domain_name, "abcdef12345678.cloudfront.net") + self.assertEqual(response[0].status, "InProgress") + self.assertEqual(response[0].cnames, ["static.example.com"]) + self.assertEqual(response[0].enabled, True) + self.assertTrue(isinstance(response[0].origin, CustomOrigin)) + self.assertEqual(response[0].origin.dns_name, "example.com") + self.assertEqual(response[0].origin.http_port, 80) + self.assertEqual(response[0].origin.https_port, 443) + self.assertEqual(response[0].origin.origin_protocol_policy, 'http-only') + + def test_get_distribution_config(self): + body = """ + + + example.com + 80 + 443 + http-only + + 1234567890123 + static.example.com + true + + """ + + self.set_http_response(status_code=200, body=body, header={"Etag": "AABBCC"}) + response = self.service_connection.get_distribution_config('EEEEEEEEEEEEE') + + self.assertTrue(isinstance(response, DistributionConfig)) + self.assertTrue(isinstance(response.origin, CustomOrigin)) + self.assertEqual(response.origin.dns_name, "example.com") + self.assertEqual(response.origin.http_port, 80) + self.assertEqual(response.origin.https_port, 443) + self.assertEqual(response.origin.origin_protocol_policy, "http-only") + self.assertEqual(response.cnames, ["static.example.com"]) + self.assertTrue(response.enabled) + self.assertEqual(response.etag, "AABBCC") + + def test_set_distribution_config(self): + get_body = """ + + + example.com + 80 + 443 + http-only + + 1234567890123 + static.example.com + true + + """ + + put_body = """ + + EEEEEE + InProgress + 2014-02-04T10:47:53.493Z + 0 + d2000000000000.cloudfront.net + + + example.com + 80 + 443 + match-viewer + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + this is a comment + false + + + """ + + self.set_http_response(status_code=200, body=get_body, header={"Etag": "AA"}) + conf = self.service_connection.get_distribution_config('EEEEEEE') + + self.set_http_response(status_code=200, body=put_body, header={"Etag": "AABBCCD"}) + conf.comment = 'this is a comment' + response = self.service_connection.set_distribution_config('EEEEEEE', conf.etag, conf) + + self.assertEqual(response, "AABBCCD") + + def test_get_distribution_info(self): + body = """ + + EEEEEEEEEEEEE + InProgress + 2014-02-03T11:03:41.087Z + 0 + abcdef12345678.cloudfront.net + + + example.com + 80 + 443 + http-only + + 1111111111111 + static.example.com + true + + + """ + + self.set_http_response(status_code=200, body=body) + response = self.service_connection.get_distribution_info('EEEEEEEEEEEEE') + + self.assertTrue(isinstance(response, Distribution)) + self.assertTrue(isinstance(response.config, DistributionConfig)) + self.assertTrue(isinstance(response.config.origin, CustomOrigin)) + self.assertEqual(response.config.origin.dns_name, "example.com") + self.assertEqual(response.config.origin.http_port, 80) + self.assertEqual(response.config.origin.https_port, 443) + self.assertEqual(response.config.origin.origin_protocol_policy, "http-only") + self.assertEqual(response.config.cnames, ["static.example.com"]) + self.assertTrue(response.config.enabled) + self.assertEqual(response.id, "EEEEEEEEEEEEE") + self.assertEqual(response.status, "InProgress") + self.assertEqual(response.domain_name, "abcdef12345678.cloudfront.net") + self.assertEqual(response.in_progress_invalidation_batches, 0) + + def test_create_distribution(self): + body = """ + + EEEEEEEEEEEEEE + InProgress + 2014-02-04T10:34:07.873Z + 0 + d2000000000000.cloudfront.net + + + example.com + 80 + 443 + match-viewer + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + example.com distribution + false + + + """ + + self.set_http_response(status_code=201, body=body) + origin = CustomOrigin("example.com", origin_protocol_policy="match_viewer") + response = self.service_connection.create_distribution(origin, enabled=False, comment="example.com distribution") + + self.assertTrue(isinstance(response, Distribution)) + self.assertTrue(isinstance(response.config, DistributionConfig)) + self.assertTrue(isinstance(response.config.origin, CustomOrigin)) + self.assertEqual(response.config.origin.dns_name, "example.com") + self.assertEqual(response.config.origin.http_port, 80) + self.assertEqual(response.config.origin.https_port, 443) + self.assertEqual(response.config.origin.origin_protocol_policy, "match-viewer") + self.assertEqual(response.config.cnames, []) + self.assertTrue(not response.config.enabled) + self.assertEqual(response.id, "EEEEEEEEEEEEEE") + self.assertEqual(response.status, "InProgress") + self.assertEqual(response.domain_name, "d2000000000000.cloudfront.net") + self.assertEqual(response.in_progress_invalidation_batches, 0) diff -Nru python-boto-2.20.1/tests/unit/cloudsearch2/__init__.py python-boto-2.29.1/tests/unit/cloudsearch2/__init__.py --- python-boto-2.20.1/tests/unit/cloudsearch2/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudsearch2/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru python-boto-2.20.1/tests/unit/cloudsearch2/test_connection.py python-boto-2.29.1/tests/unit/cloudsearch2/test_connection.py --- python-boto-2.20.1/tests/unit/cloudsearch2/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudsearch2/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,250 @@ +#!/usr/bin env python + +from tests.unit import AWSMockServiceTestCase + +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection + + +class TestCloudSearchCreateDomain(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return """ +{ + "CreateDomainResponse": { + "CreateDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": false, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} +""" + + def test_create_domain(self): + self.set_http_response(status_code=200) + self.service_connection.create_domain('demo') + + self.assert_request_parameters({ + 'Action': 'CreateDomain', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_connect_result_endpoints(self): + """Check that endpoints & ARNs are correctly returned from AWS""" + + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual( + domain.doc_service_endpoint, + "doc-demo.us-east-1.cloudsearch.amazonaws.com") + self.assertEqual(domain.service_arn, + "arn:aws:cs:us-east-1:1234567890:domain/demo") + self.assertEqual( + domain.search_service_endpoint, + "search-demo.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_connect_result_statuses(self): + """Check that domain statuses are correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual(domain.created, True) + self.assertEqual(domain.processing, False) + self.assertEqual(domain.requires_index_documents, False) + self.assertEqual(domain.deleted, False) + + def test_cloudsearch_connect_result_details(self): + """Check that the domain information is correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual(domain.id, "1234567890/demo") + self.assertEqual(domain.name, "demo") + + def test_cloudsearch_documentservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + document = domain.get_document_service() + + self.assertEqual( + document.endpoint, + "doc-demo.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_searchservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + search = domain.get_search_service() + + self.assertEqual( + search.endpoint, + "search-demo.us-east-1.cloudsearch.amazonaws.com") + + +class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return """ +{ + "DeleteDomainResponse": { + "DeleteDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "test", + "Deleted": true, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": null + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": null + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} +""" + + def test_cloudsearch_deletion(self): + """ + Check that the correct arguments are sent to AWS when creating a + cloudsearch connection. + """ + self.set_http_response(status_code=200) + self.service_connection.delete_domain('demo') + + self.assert_request_parameters({ + 'Action': 'DeleteDomain', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + +class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return """ +{ + "IndexDocumentsResponse": { + "IndexDocumentsResult": { + "FieldNames": [ + "average_score", + "brand_id", + "colors", + "context", + "context_owner", + "created_at", + "creator_id", + "description", + "file_size", + "format", + "has_logo", + "has_messaging", + "height", + "image_id", + "ingested_from", + "is_advertising", + "is_photo", + "is_reviewed", + "modified_at", + "subject_date", + "tags", + "title", + "width" + ] + }, + "ResponseMetadata": { + "RequestId": "42e618d9-c4d9-11e3-8242-c32da3041159" + } + } +} +""" + + def test_cloudsearch_index_documents(self): + """ + Check that the correct arguments are sent to AWS when indexing a + domain. + """ + self.set_http_response(status_code=200) + self.service_connection.index_documents('demo') + + self.assert_request_parameters({ + 'Action': 'IndexDocuments', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_index_documents_resp(self): + """ + Check that the AWS response is being parsed correctly when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + fields = (api_response['IndexDocumentsResponse'] + ['IndexDocumentsResult'] + ['FieldNames']) + + self.assertEqual(fields, ['average_score', 'brand_id', 'colors', + 'context', 'context_owner', + 'created_at', 'creator_id', + 'description', 'file_size', 'format', + 'has_logo', 'has_messaging', 'height', + 'image_id', 'ingested_from', + 'is_advertising', 'is_photo', + 'is_reviewed', 'modified_at', + 'subject_date', 'tags', 'title', + 'width']) diff -Nru python-boto-2.20.1/tests/unit/cloudsearch2/test_document.py python-boto-2.29.1/tests/unit/cloudsearch2/test_document.py --- python-boto-2.20.1/tests/unit/cloudsearch2/test_document.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudsearch2/test_document.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,311 @@ +#!/usr/bin env python + +from tests.unit import unittest +from httpretty import HTTPretty +from mock import MagicMock + +import urlparse +import json + +from boto.cloudsearch2.document import DocumentServiceConnection +from boto.cloudsearch2.document import CommitMismatchError, EncodingError, \ + ContentTooLongError, DocumentServiceConnection + +import boto + +class CloudSearchDocumentTest(unittest.TestCase): + def setUp(self): + HTTPretty.enable() + HTTPretty.register_uri( + HTTPretty.POST, + ("http://doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com/" + "2013-01-01/documents/batch"), + body=json.dumps(self.response), + content_type="application/json") + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchDocumentSingleTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_add_basics(self): + """ + Check that a simple add document actually sends an add document request + to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_basic(self): + """ + Check that a simple add document sends correct document metadata to + AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['id'], '1234') + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_fields(self): + """ + Check that a simple add document sends the actual document to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['fields']['category'], ['cat_a', 'cat_b', + 'cat_c']) + self.assertEqual(args['fields']['id'], '1234') + self.assertEqual(args['fields']['title'], 'Title 1') + + def test_cloudsearch_add_single_result(self): + """ + Check that the reply from adding a single document is correctly parsed. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 1) + self.assertEqual(doc.deletes, 0) + + self.assertEqual(doc.doc_service, document) + + +class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 3, + 'deletes': 0, + } + + objs = { + '1234': { + 'fields': {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}}, + '1235': { + 'fields': {"id": "1235", "title": "Title 2", + "category": ["cat_b", "cat_c", + "cat_d"]}}, + '1236': { + 'fields': {"id": "1236", "title": "Title 3", + "category": ["cat_e", "cat_f", "cat_g"]}}, + } + + + def test_cloudsearch_add_basics(self): + """Check that multiple documents are added correctly to AWS""" + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['fields']) + document.commit() + + args = json.loads(HTTPretty.last_request.body) + + for arg in args: + self.assertTrue(arg['id'] in self.objs) + self.assertEqual(arg['fields']['id'], + self.objs[arg['id']]['fields']['id']) + self.assertEqual(arg['fields']['title'], + self.objs[arg['id']]['fields']['title']) + self.assertEqual(arg['fields']['category'], + self.objs[arg['id']]['fields']['category']) + + def test_cloudsearch_add_results(self): + """ + Check that the result from adding multiple documents is parsed + correctly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['fields']) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, len(self.objs)) + self.assertEqual(doc.deletes, 0) + + +class CloudSearchDocumentDelete(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 1, + } + + def test_cloudsearch_delete(self): + """ + Test that the request for a single document deletion is done properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + document.commit() + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['type'], 'delete') + self.assertEqual(args['id'], '5') + + def test_cloudsearch_delete_results(self): + """ + Check that the result of a single document deletion is parsed properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 0) + self.assertEqual(doc.deletes, 1) + + +class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 2, + } + + def test_cloudsearch_delete_multiples(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + document.delete("6") + document.commit() + args = json.loads(HTTPretty.last_request.body) + + self.assertEqual(len(args), 2) + for arg in args: + self.assertEqual(arg['type'], 'delete') + + +class CloudSearchSDFManipulation(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + + def test_cloudsearch_initial_sdf_is_blank(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + self.assertEqual(document.get_sdf(), '[]') + + def test_cloudsearch_single_document_sdf(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertNotEqual(document.get_sdf(), '[]') + + document.clear_sdf() + + self.assertEqual(document.get_sdf(), '[]') + +class CloudSearchBadSDFTesting(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_erroneous_sdf(self): + original = boto.log.error + boto.log.error = MagicMock() + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": None, + "category": ["cat_a", "cat_b", "cat_c"]}) + + document.commit() + self.assertNotEqual(len(boto.log.error.call_args_list), 1) + + boto.log.error = original + + +class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Illegal Unicode character in document'}] + } + + def test_fake_bad_unicode(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(EncodingError, document.commit) + + +class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'The Content-Length is too long'}] + } + + def test_fake_docs_too_big(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(ContentTooLongError, document.commit) + + +class CloudSearchDocumentErrorMismatch(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Something went wrong'}] + } + + def test_fake_failure(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(CommitMismatchError, document.commit) diff -Nru python-boto-2.20.1/tests/unit/cloudsearch2/test_exceptions.py python-boto-2.29.1/tests/unit/cloudsearch2/test_exceptions.py --- python-boto-2.20.1/tests/unit/cloudsearch2/test_exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudsearch2/test_exceptions.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,37 @@ +import mock +from boto.compat import json +from tests.unit import unittest + +from .test_search import HOSTNAME, CloudSearchSearchBaseTest +from boto.cloudsearch2.search import SearchConnection, SearchServiceException + + +def fake_loads_value_error(content, *args, **kwargs): + """Callable to generate a fake ValueError""" + raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.") + + +def fake_loads_json_error(content, *args, **kwargs): + """Callable to generate a fake JSONDecodeError""" + raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.', + '', 0) + + +class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest): + response = '{}' + + def test_no_simplejson_value_error(self): + with mock.patch.object(json, 'loads', fake_loads_value_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') + + @unittest.skipUnless(hasattr(json, 'JSONDecodeError'), + 'requires simplejson') + def test_simplejson_jsondecodeerror(self): + with mock.patch.object(json, 'loads', fake_loads_json_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') diff -Nru python-boto-2.20.1/tests/unit/cloudsearch2/test_search.py python-boto-2.29.1/tests/unit/cloudsearch2/test_search.py --- python-boto-2.20.1/tests/unit/cloudsearch2/test_search.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/cloudsearch2/test_search.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,370 @@ +#!/usr/bin env python + +from tests.unit import unittest +from httpretty import HTTPretty + +import urlparse +import json +import mock +import requests + +from boto.cloudsearch2.search import SearchConnection, SearchServiceException + +HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com" +FULL_URL = 'http://%s/2013-01-01/search' % HOSTNAME + + +class CloudSearchSearchBaseTest(unittest.TestCase): + + hits = [ + { + 'id': '12341', + 'fields': { + 'title': 'Document 1', + 'rank': 1 + } + }, + { + 'id': '12342', + 'fields': { + 'title': 'Document 2', + 'rank': 2 + } + }, + { + 'id': '12343', + 'fields': { + 'title': 'Document 3', + 'rank': 3 + } + }, + { + 'id': '12344', + 'fields': { + 'title': 'Document 4', + 'rank': 4 + } + }, + { + 'id': '12345', + 'fields': { + 'title': 'Document 5', + 'rank': 5 + } + }, + { + 'id': '12346', + 'fields': { + 'title': 'Document 6', + 'rank': 6 + } + }, + { + 'id': '12347', + 'fields': { + 'title': 'Document 7', + 'rank': 7 + } + }, + ] + + content_type = "text/xml" + response_status = 200 + + def get_args(self, requestline): + (_, request, _) = requestline.split(" ") + (_, request) = request.split("?", 1) + args = urlparse.parse_qs(request) + return args + + def setUp(self): + HTTPretty.enable() + body = self.response + + if not isinstance(body, basestring): + body = json.dumps(body) + + HTTPretty.register_uri(HTTPretty.GET, FULL_URL, + body=body, + content_type=self.content_type, + status=self.response_status) + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchSearchTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr':"Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit':CloudSearchSearchBaseTest.hits + }, + 'status': { + 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + def test_cloudsearch_qsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test') + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['q'], ["Test"]) + self.assertEqual(args['start'], ["0"]) + self.assertEqual(args['size'], ["10"]) + + + def test_cloudsearch_search_details(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', size=50, start=20) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['q'], ["Test"]) + self.assertEqual(args['size'], ["50"]) + self.assertEqual(args['start'], ["20"]) + + def test_cloudsearch_facet_constraint_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet={'author': "'John Smith','Mark Smith'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet.author'], + ["'John Smith','Mark Smith'"]) + + def test_cloudsearch_facet_constraint_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet={'author': "'John Smith','Mark Smith'", + 'category': "'News','Reviews'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet.author'], + ["'John Smith','Mark Smith'"]) + self.assertEqual(args['facet.category'], + ["'News','Reviews'"]) + + def test_cloudsearch_facet_sort_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet={'author': {'sort':'alpha'}}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + print args + + self.assertEqual(args['facet.author'], ['{"sort": "alpha"}']) + + def test_cloudsearch_facet_sort_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet={'author': {'sort': 'alpha'}, + 'cat': {'sort': 'count'}}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet.author'], ['{"sort": "alpha"}']) + self.assertEqual(args['facet.cat'], ['{"sort": "count"}']) + + def test_cloudsearch_result_fields_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['return'], ['author']) + + def test_cloudsearch_result_fields_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author', 'title']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['return'], ['author,title']) + + def test_cloudsearch_results_meta(self): + """Check returned metadata is parsed correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # These rely on the default response which is fed into HTTPretty + self.assertEqual(results.hits, 30) + self.assertEqual(results.docs[0]['fields']['rank'], 1) + + def test_cloudsearch_results_info(self): + """Check num_pages_needed is calculated correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # This relies on the default response which is fed into HTTPretty + self.assertEqual(results.num_pages_needed, 3.0) + + def test_cloudsearch_results_matched(self): + """ + Check that information objects are passed back through the API + correctly. + """ + search = SearchConnection(endpoint=HOSTNAME) + query = search.build_query(q='Test') + + results = search(query) + + self.assertEqual(results.search_service, search) + self.assertEqual(results.query, query) + + def test_cloudsearch_results_hits(self): + """Check that documents are parsed properly from AWS""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + hits = map(lambda x: x['id'], results.docs) + + # This relies on the default response which is fed into HTTPretty + self.assertEqual( + hits, ["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + + def test_cloudsearch_results_iterator(self): + """Check the results iterator""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + results_correct = iter(["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + for x in results: + self.assertEqual(x['id'], results_correct.next()) + + + def test_cloudsearch_results_internal_consistancy(self): + """Check the documents length matches the iterator details""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + self.assertEqual(len(results), len(results.docs)) + + def test_cloudsearch_search_nextpage(self): + """Check next page query is correct""" + search = SearchConnection(endpoint=HOSTNAME) + query1 = search.build_query(q='Test') + query2 = search.build_query(q='Test') + + results = search(query2) + + self.assertEqual(results.next_page().query.start, + query1.start + query1.size) + self.assertEqual(query1.q, query2.q) + +class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr':"Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit':CloudSearchSearchBaseTest.hits + }, + 'status': { + 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + }, + 'facets': { + 'tags': {}, + 'animals': {'buckets': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value':'lions'}]}, + } + } + + def test_cloudsearch_search_facets(self): + #self.response['facets'] = {'tags': {}} + + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test', facet={'tags': {}}) + + self.assertTrue('tags' not in results.facets) + self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'}) + + +class CloudSearchNonJsonTest(CloudSearchSearchBaseTest): + response = '

    500 Internal Server Error

    ' + response_status = 500 + content_type = 'text/xml' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaises(SearchServiceException): + search.search(q='Test') + + +class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest): + response = '

    403 Forbidden

    foo bar baz' + response_status = 403 + content_type = 'text/html' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'): + search.search(q='Test') + + +class FakeResponse(object): + status_code = 405 + content = '' + + +class CloudSearchConnectionTest(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchConnectionTest, self).setUp() + self.conn = SearchConnection( + endpoint='test-domain.cloudsearch.amazonaws.com' + ) + + def test_expose_additional_error_info(self): + mpo = mock.patch.object + fake = FakeResponse() + fake.content = 'Nopenopenope' + + # First, in the case of a non-JSON, non-403 error. + with mpo(self.conn.session, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='not_gonna_happen') + + self.assertTrue('non-json response' in str(cm.exception)) + self.assertTrue('Nopenopenope' in str(cm.exception)) + + # Then with JSON & an 'error' key within. + fake.content = json.dumps({ + 'error': "Something went wrong. Oops." + }) + + with mpo(self.conn.session, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='no_luck_here') + + self.assertTrue('Unknown error' in str(cm.exception)) + self.assertTrue('went wrong. Oops' in str(cm.exception)) diff -Nru python-boto-2.20.1/tests/unit/dynamodb/test_types.py python-boto-2.29.1/tests/unit/dynamodb/test_types.py --- python-boto-2.20.1/tests/unit/dynamodb/test_types.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/dynamodb/test_types.py 2014-05-30 20:49:34.000000000 +0000 @@ -78,5 +78,17 @@ self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}), set([1.1, 2.2, 3.3])) + +class TestBinary(unittest.TestCase): + def test_bad_input(self): + with self.assertRaises(TypeError): + data = types.Binary(1) + + def test_good_input(self): + data = types.Binary(chr(1)) + + self.assertEqual('\x01', str(data)) + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/dynamodb2/test_table.py python-boto-2.29.1/tests/unit/dynamodb2/test_table.py --- python-boto-2.20.1/tests/unit/dynamodb2/test_table.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/dynamodb2/test_table.py 2014-05-30 20:49:34.000000000 +0000 @@ -2,19 +2,21 @@ import unittest from boto.dynamodb2 import exceptions from boto.dynamodb2.fields import (HashKey, RangeKey, - AllIndex, KeysOnlyIndex, IncludeIndex) + AllIndex, KeysOnlyIndex, IncludeIndex, + GlobalAllIndex, GlobalKeysOnlyIndex, + GlobalIncludeIndex) from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet from boto.dynamodb2.table import Table -from boto.dynamodb2.types import (STRING, NUMBER, +from boto.dynamodb2.types import (STRING, NUMBER, BINARY, FILTER_OPERATORS, QUERY_OPERATORS) +from boto.exception import JSONResponseError FakeDynamoDBConnection = mock.create_autospec(DynamoDBConnection) - class SchemaFieldsTestCase(unittest.TestCase): def test_hash_key(self): hash_key = HashKey('hello') @@ -169,6 +171,170 @@ } }) + def test_global_all_index(self): + all_index = GlobalAllIndex('AllKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], + throughput={ + 'read': 6, + 'write': 2, + }) + self.assertEqual(all_index.name, 'AllKeys') + self.assertEqual([part.attr_type for part in all_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(all_index.projection_type, 'ALL') + + self.assertEqual(all_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(all_index.schema(), { + 'IndexName': 'AllKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 6, + 'WriteCapacityUnits': 2 + } + }) + + def test_global_keys_only_index(self): + keys_only = GlobalKeysOnlyIndex('KeysOnly', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], + throughput={ + 'read': 3, + 'write': 4, + }) + self.assertEqual(keys_only.name, 'KeysOnly') + self.assertEqual([part.attr_type for part in keys_only.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(keys_only.projection_type, 'KEYS_ONLY') + + self.assertEqual(keys_only.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(keys_only.schema(), { + 'IndexName': 'KeysOnly', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }) + + def test_global_include_index(self): + # Lean on the default throughput + include_index = GlobalIncludeIndex('IncludeKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], includes=[ + 'gender', + 'friend_count' + ]) + self.assertEqual(include_index.name, 'IncludeKeys') + self.assertEqual([part.attr_type for part in include_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(include_index.projection_type, 'INCLUDE') + + self.assertEqual(include_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(include_index.schema(), { + 'IndexName': 'IncludeKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'INCLUDE', + 'NonKeyAttributes': [ + 'gender', + 'friend_count', + ] + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + }) + + def test_global_include_index_throughput(self): + include_index = GlobalIncludeIndex('IncludeKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], includes=[ + 'gender', + 'friend_count' + ], throughput={ + 'read': 10, + 'write': 8 + }) + + self.assertEqual(include_index.schema(), { + 'IndexName': 'IncludeKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'INCLUDE', + 'NonKeyAttributes': [ + 'gender', + 'friend_count', + ] + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 8 + } + }) + class ItemTestCase(unittest.TestCase): def setUp(self): @@ -476,7 +642,7 @@ 'date_joined' ])) - def test_prepare_partial(self): + def test_prepare_partial_empty_set(self): self.johndoe.mark_clean() # Change some data. self.johndoe['first_name'] = 'Johann' @@ -691,11 +857,33 @@ def setUp(self): super(ResultSetTestCase, self).setUp() self.results = ResultSet() - self.results.to_call(fake_results, 'john', greeting='Hello', limit=20) + self.result_function = mock.MagicMock(side_effect=fake_results) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=20) def test_first_key(self): self.assertEqual(self.results.first_key, 'exclusive_start_key') + def test_max_page_size_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello') + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=10) + self.result_function.reset_mock() + + def test_max_page_size_and_smaller_limit_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=5) + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=5) + self.result_function.reset_mock() + + def test_max_page_size_and_bigger_limit_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=15) + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=10) + self.result_function.reset_mock() + def test_fetch_more(self): # First "page". self.results.fetch_more() @@ -707,6 +895,9 @@ 'Hello john #4', ]) + self.result_function.assert_called_with('john', greeting='Hello', limit=20) + self.result_function.reset_mock() + # Fake in a last key. self.results._last_key_seen = 4 # Second "page". @@ -719,6 +910,9 @@ 'Hello john #9', ]) + self.result_function.assert_called_with('john', greeting='Hello', limit=20, exclusive_start_key=4) + self.result_function.reset_mock() + # Fake in a last key. self.results._last_key_seen = 9 # Last "page". @@ -745,20 +939,20 @@ self.assertEqual(self.results.next(), 'Hello john #2') self.assertEqual(self.results.next(), 'Hello john #3') self.assertEqual(self.results.next(), 'Hello john #4') - self.assertEqual(self.results.call_kwargs['limit'], 15) + self.assertEqual(self.results._limit, 15) # Second page. self.assertEqual(self.results.next(), 'Hello john #5') self.assertEqual(self.results.next(), 'Hello john #6') self.assertEqual(self.results.next(), 'Hello john #7') self.assertEqual(self.results.next(), 'Hello john #8') self.assertEqual(self.results.next(), 'Hello john #9') - self.assertEqual(self.results.call_kwargs['limit'], 10) + self.assertEqual(self.results._limit, 10) # Third page. self.assertEqual(self.results.next(), 'Hello john #10') self.assertEqual(self.results.next(), 'Hello john #11') self.assertEqual(self.results.next(), 'Hello john #12') self.assertRaises(StopIteration, self.results.next) - self.assertEqual(self.results.call_kwargs['limit'], 7) + self.assertEqual(self.results._limit, 7) def test_limit_smaller_than_first_page(self): results = ResultSet() @@ -960,7 +1154,17 @@ "KeyType": "RANGE" } ] - schema_1 = self.users._introspect_schema(raw_schema_1) + raw_attributes_1 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'date_joined', + 'AttributeType': 'S' + }, + ] + schema_1 = self.users._introspect_schema(raw_schema_1, raw_attributes_1) self.assertEqual(len(schema_1), 2) self.assertTrue(isinstance(schema_1[0], HashKey)) self.assertEqual(schema_1[0].name, 'username') @@ -973,12 +1177,49 @@ "KeyType": "BTREE" }, ] + raw_attributes_2 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ] self.assertRaises( exceptions.UnknownSchemaFieldError, self.users._introspect_schema, - raw_schema_2 + raw_schema_2, + raw_attributes_2 ) + # Test a complex schema & ensure the types come back correctly. + raw_schema_3 = [ + { + "AttributeName": "user_id", + "KeyType": "HASH" + }, + { + "AttributeName": "junk", + "KeyType": "RANGE" + } + ] + raw_attributes_3 = [ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N' + }, + { + 'AttributeName': 'junk', + 'AttributeType': 'B' + }, + ] + schema_3 = self.users._introspect_schema(raw_schema_3, raw_attributes_3) + self.assertEqual(len(schema_3), 2) + self.assertTrue(isinstance(schema_3[0], HashKey)) + self.assertEqual(schema_3[0].name, 'user_id') + self.assertEqual(schema_3[0].data_type, NUMBER) + self.assertTrue(isinstance(schema_3[1], RangeKey)) + self.assertEqual(schema_3[1].name, 'junk') + self.assertEqual(schema_3[1].data_type, BINARY) + def test__introspect_indexes(self): raw_indexes_1 = [ { @@ -1132,6 +1373,13 @@ KeysOnlyIndex('FriendCountIndex', parts=[ RangeKey('friend_count') ]), + ], global_indexes=[ + GlobalKeysOnlyIndex('FullFriendCountIndex', parts=[ + RangeKey('friend_count') + ], throughput={ + 'read': 10, + 'write': 8, + }), ], connection=conn) self.assertTrue(retval) @@ -1165,6 +1413,24 @@ 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 20 }, + global_secondary_indexes=[ + { + 'KeySchema': [ + { + 'KeyType': 'RANGE', + 'AttributeName': 'friend_count' + } + ], + 'IndexName': 'FullFriendCountIndex', + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + }, + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 8, + 'ReadCapacityUnits': 10 + } + } + ], local_secondary_indexes=[ { 'KeySchema': [ @@ -1252,10 +1518,65 @@ self.assertEqual(self.users.throughput['read'], 7) self.assertEqual(self.users.throughput['write'], 2) - mock_update.assert_called_once_with('users', { - 'WriteCapacityUnits': 2, - 'ReadCapacityUnits': 7 - }) + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=None, + provisioned_throughput={ + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 7 + } + ) + + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.assertEqual(self.users.throughput['read'], 7) + self.assertEqual(self.users.throughput['write'], 2) + self.users.update(throughput={ + 'read': 9, + 'write': 5, + }, + global_indexes={ + 'WhateverIndex': { + 'read': 6, + 'write': 1 + }, + 'AnotherIndex': { + 'read': 1, + 'write': 2 + } + }) + self.assertEqual(self.users.throughput['read'], 9) + self.assertEqual(self.users.throughput['write'], 5) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Update': { + 'IndexName': 'AnotherIndex', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 1 + } + } + }, + { + 'Update': { + 'IndexName': 'WhateverIndex', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 1, + 'ReadCapacityUnits': 6 + } + } + } + ], + provisioned_throughput={ + 'WriteCapacityUnits': 5, + 'ReadCapacityUnits': 9, + } + ) def test_delete(self): with mock.patch.object( @@ -1288,7 +1609,46 @@ mock_get_item.assert_called_once_with('users', { 'username': {'S': 'johndoe'} - }, consistent_read=False) + }, consistent_read=False, attributes_to_get=None) + + with mock.patch.object( + self.users.connection, + 'get_item', + return_value=expected) as mock_get_item: + item = self.users.get_item(username='johndoe', attributes=[ + 'username', + 'first_name', + ]) + + mock_get_item.assert_called_once_with('users', { + 'username': {'S': 'johndoe'} + }, consistent_read=False, attributes_to_get=['username', 'first_name']) + + def test_has_item(self): + expected = { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + } + + with mock.patch.object( + self.users.connection, + 'get_item', + return_value=expected) as mock_get_item: + found = self.users.has_item(username='johndoe') + self.assertTrue(found) + + with mock.patch.object( + self.users.connection, + 'get_item') as mock_get_item: + mock_get_item.side_effect = JSONResponseError("Nope.", None, None) + found = self.users.has_item(username='mrsmith') + self.assertFalse(found) def test_lookup_hash(self): """Tests the "lookup" function with just a hash key""" @@ -1347,7 +1707,6 @@ username= 'johndoe', date_joined= 1366056668) - def test_put_item(self): with mock.patch.object( self.users.connection, @@ -1837,7 +2196,7 @@ mock_query.assert_called_once_with('users', consistent_read=False, - scan_index_forward=True, + scan_index_forward=False, index_name=None, attributes_to_get=None, limit=4, @@ -1847,7 +2206,9 @@ 'ComparisonOperator': 'BETWEEN', } }, - select=None + select=None, + query_filter=None, + conditional_operator=None ) # Now alter the expected. @@ -1868,7 +2229,9 @@ exclusive_start_key={ 'username': 'adam', }, - consistent=True + consistent=True, + query_filter=None, + conditional_operator='AND' ) usernames = [res['username'] for res in results['results']] self.assertEqual(usernames, ['johndoe', 'jane', 'alice', 'bob']) @@ -1884,7 +2247,7 @@ }, index_name=None, attributes_to_get=None, - scan_index_forward=True, + scan_index_forward=False, limit=4, exclusive_start_key={ 'username': { @@ -1892,7 +2255,9 @@ }, }, consistent_read=True, - select=None + select=None, + query_filter=None, + conditional_operator='AND' ) def test_private_scan(self): @@ -1953,7 +2318,9 @@ }, limit=2, segment=None, - total_segments=None + attributes_to_get=None, + total_segments=None, + conditional_operator=None ) # Now alter the expected. @@ -1995,7 +2362,9 @@ }, }, segment=None, - total_segments=None + attributes_to_get=None, + total_segments=None, + conditional_operator=None ) def test_query(self): @@ -2015,7 +2384,7 @@ 'last_key': 'jane', } - results = self.users.query(last_name__eq='Doe') + results = self.users.query_2(last_name__eq='Doe') self.assertTrue(isinstance(results, ResultSet)) self.assertEqual(len(results._results), 0) self.assertEqual(results.the_callable, self.users._query) @@ -2069,7 +2438,7 @@ 'last_key': 'jane', } - results = self.users.query(last_name__eq='Doe', + results = self.users.query_2(last_name__eq='Doe', attributes=['username']) self.assertTrue(isinstance(results, ResultSet)) self.assertEqual(len(results._results), 0) @@ -2147,6 +2516,38 @@ self.assertEqual(mock_scan_2.call_count, 1) + def test_scan_with_specific_attributes(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + }), + Item(self.users, data={ + 'username': 'jane', + }), + ], + 'last_key': 'jane', + } + + results = self.users.scan(attributes=['username']) + self.assertTrue(isinstance(results, ResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._scan) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_query: + res_1 = results.next() + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + self.assertEqual(res_1.keys(), ['username']) + res_2 = results.next() + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_query.call_count, 1) + def test_count(self): expected = { "Table": { diff -Nru python-boto-2.20.1/tests/unit/ec2/autoscale/test_group.py python-boto-2.29.1/tests/unit/ec2/autoscale/test_group.py --- python-boto-2.20.1/tests/unit/ec2/autoscale/test_group.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/autoscale/test_group.py 2014-05-30 20:49:34.000000000 +0000 @@ -33,7 +33,7 @@ from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping -from boto.ec2.autoscale import launchconfig +from boto.ec2.autoscale import launchconfig, LaunchConfiguration class TestAutoScaleGroup(AWSMockServiceTestCase): connection_class = AutoScaleConnection @@ -55,7 +55,8 @@ autoscale = AutoScalingGroup( name='foo', launch_config='lauch_config', min_size=1, max_size=2, - termination_policies=['OldestInstance', 'OldestLaunchConfiguration']) + termination_policies=['OldestInstance', 'OldestLaunchConfiguration'], + instance_id='test-id') self.service_connection.create_auto_scaling_group(autoscale) self.assert_request_parameters({ 'Action': 'CreateAutoScalingGroup', @@ -65,8 +66,45 @@ 'MinSize': 1, 'TerminationPolicies.member.1': 'OldestInstance', 'TerminationPolicies.member.2': 'OldestLaunchConfiguration', + 'InstanceId': 'test-id', }, ignore_params_values=['Version']) + def test_autoscaling_group_single_vpc_zone_identifier(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier='vpc_zone_1') + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + + def test_autoscaling_group_vpc_zone_identifier_list(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier=['vpc_zone_1', 'vpc_zone_2']) + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + + def test_autoscaling_group_vpc_zone_identifier_multi(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier='vpc_zone_1,vpc_zone_2') + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + class TestAutoScaleGroupHonorCooldown(AWSMockServiceTestCase): connection_class = AutoScaleConnection @@ -167,6 +205,7 @@ OldestLaunchConfiguration 2 + Something @@ -192,6 +231,7 @@ self.assertEqual(as_group.tags, []) self.assertEqual(as_group.termination_policies, ['OldestInstance', 'OldestLaunchConfiguration']) + self.assertEqual(as_group.instance_id, 'Something') class TestDescribeTerminationPolicies(AWSMockServiceTestCase): @@ -223,6 +263,74 @@ ['ClosestToNextInstanceHour', 'Default', 'NewestInstance', 'OldestInstance', 'OldestLaunchConfiguration']) +class TestLaunchConfigurationDescribe(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + # This is a dummy response + return """ + + + + + true + + 2013-01-21T23:04:42.200Z + + my-test-lc + + m1.small + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + + ami-514ac838 + + + + true + + false + + + + + d05a22f8-b690-11e2-bf8e-2113fEXAMPLE + + + """ + + def test_get_all_launch_configurations(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations() + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], LaunchConfiguration)) + + self.assertEqual(response[0].associate_public_ip_address, True) + self.assertEqual(response[0].name, "my-test-lc") + self.assertEqual(response[0].instance_type, "m1.small") + self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc") + self.assertEqual(response[0].image_id, "ami-514ac838") + self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring)) + self.assertEqual(response[0].instance_monitoring.enabled, 'true') + self.assertEqual(response[0].ebs_optimized, False) + self.assertEqual(response[0].block_device_mappings, []) + + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + }, ignore_params_values=['Version']) + + def test_get_all_configuration_limited(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"]) + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + 'MaxRecords': 10, + 'LaunchConfigurationNames.member.1': 'my-test1', + 'LaunchConfigurationNames.member.2': 'my-test2' + }, ignore_params_values=['Version']) + class TestLaunchConfiguration(AWSMockServiceTestCase): connection_class = AutoScaleConnection @@ -251,7 +359,10 @@ security_groups = ['group1', 'group2'], spot_price='price', block_device_mappings = [bdm], - associate_public_ip_address = True + associate_public_ip_address = True, + volume_type='atype', + delete_on_termination=False, + iops=3000 ) response = self.service_connection.create_launch_configuration(lc) @@ -272,7 +383,10 @@ 'SecurityGroups.member.1': 'group1', 'SecurityGroups.member.2': 'group2', 'SpotPrice': 'price', - 'AssociatePublicIpAddress' : 'true' + 'AssociatePublicIpAddress' : 'true', + 'VolumeType': 'atype', + 'DeleteOnTermination': 'false', + 'Iops': 3000, }, ignore_params_values=['Version']) @@ -441,7 +555,7 @@ resource_type='auto-scaling-group', propagate_at_launch=False )] - + response = self.service_connection.create_or_update_tags(tags) @@ -467,8 +581,8 @@ ('ResourceId', 'sg-01234567', 'resource_id'), ('PropagateAtLaunch', 'true', 'propagate_at_launch')]: self.check_tag_attributes_set(i[0], i[1], i[2]) - - + + def check_tag_attributes_set(self, name, value, attr): tag = Tag() tag.endElement(name, value, None) @@ -477,5 +591,214 @@ else: self.assertEqual(getattr(tag, attr), value) + +class TestAttachInstances(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestAttachInstances, self).setUp() + + def default_body(self): + return """ + + + requestid + + + """ + + def test_attach_instances(self): + self.set_http_response(status_code=200) + self.service_connection.attach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'] + ) + self.assert_request_parameters({ + 'Action': 'AttachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + }, ignore_params_values=['Version']) + + +class TestGetAccountLimits(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestGetAccountLimits, self).setUp() + + def default_body(self): + return """ + + 6 + 3 + + requestid + + + """ + + def test_autoscaling_group_put_notification_configuration(self): + self.set_http_response(status_code=200) + limits = self.service_connection.get_account_limits() + self.assert_request_parameters({ + 'Action': 'DescribeAccountLimits', + }, ignore_params_values=['Version']) + self.assertEqual(limits.max_autoscaling_groups, 6) + self.assertEqual(limits.max_launch_configurations, 3) + +class TestGetAdjustmentTypes(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestGetAdjustmentTypes, self).setUp() + + def default_body(self): + return """ + + + + + ChangeInCapacity + + + ExactCapacity + + + PercentChangeInCapacity + + + + + requestId + + + """ + def test_autoscaling_adjustment_types(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_adjustment_types() + self.assert_request_parameters({ + 'Action': 'DescribeAdjustmentTypes' + }, ignore_params_values=['Version']) + + self.assertTrue(isinstance(response, list)) + self.assertEqual(response[0].adjustment_type, "ChangeInCapacity") + self.assertEqual(response[1].adjustment_type, "ExactCapacity") + self.assertEqual(response[2].adjustment_type, "PercentChangeInCapacity") + + +class TestLaunchConfigurationDescribeWithBlockDeviceTypes(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + # This is a dummy response + return """ + + + + + true + + 2013-01-21T23:04:42.200Z + + my-test-lc + + m1.small + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + + + /dev/xvdp + + snap-1234abcd + 1000 + true + io1 + 100 + + + + ephemeral1 + /dev/xvdc + + + ephemeral0 + /dev/xvdb + + + /dev/xvdh + + 2000 + false + io1 + 200 + + + + ami-514ac838 + + + + true + + false + + + + + d05a22f8-b690-11e2-bf8e-2113fEXAMPLE + + + """ + + def test_get_all_launch_configurations_with_block_device_types(self): + self.set_http_response(status_code=200) + self.service_connection.use_block_device_types = True + + response = self.service_connection.get_all_launch_configurations() + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], LaunchConfiguration)) + + self.assertEqual(response[0].associate_public_ip_address, True) + self.assertEqual(response[0].name, "my-test-lc") + self.assertEqual(response[0].instance_type, "m1.small") + self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc") + self.assertEqual(response[0].image_id, "ami-514ac838") + self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring)) + self.assertEqual(response[0].instance_monitoring.enabled, 'true') + self.assertEqual(response[0].ebs_optimized, False) + + self.assertEqual(response[0].block_device_mappings['/dev/xvdb'].ephemeral_name, 'ephemeral0') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdc'].ephemeral_name, 'ephemeral1') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].snapshot_id, 'snap-1234abcd') + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].delete_on_termination, True) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].iops, 1000) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].size, 100) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].volume_type, 'io1') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].delete_on_termination, False) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].iops, 2000) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].size, 200) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].volume_type, 'io1') + + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + }, ignore_params_values=['Version']) + + def test_get_all_configuration_limited(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"]) + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + 'MaxRecords': 10, + 'LaunchConfigurationNames.member.1': 'my-test1', + 'LaunchConfigurationNames.member.2': 'my-test2' + }, ignore_params_values=['Version']) + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/ec2/test_address.py python-boto-2.29.1/tests/unit/ec2/test_address.py --- python-boto-2.20.1/tests/unit/ec2/test_address.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_address.py 2014-05-30 20:49:34.000000000 +0000 @@ -35,6 +35,54 @@ self.address.connection.associate_address.assert_called_with( 1, "192.168.1.1", + allow_reassociation=False, + dry_run=False + ) + + def test_disassociate_calls_connection_disassociate_address_with_correct_args(self): + self.address.disassociate() + self.address.connection.disassociate_address.assert_called_with( + "192.168.1.1", + dry_run=False + ) + + +class AddressWithAllocationTest(unittest.TestCase): + def setUp(self): + self.address = Address() + self.address.connection = mock.Mock() + self.address.public_ip = "192.168.1.1" + self.address.allocation_id = "aid1" + + def check_that_attribute_has_been_set(self, name, value, attribute): + self.address.endElement(name, value, None) + self.assertEqual(getattr(self.address, attribute), value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("publicIp", "192.168.1.1", "public_ip"), + ("instanceId", 1, "instance_id"), + ("domain", "some domain", "domain"), + ("allocationId", 1, "allocation_id"), + ("associationId", 1, "association_id"), + ("somethingRandom", "somethingRandom", "somethingRandom")]: + self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + + + def test_release_calls_connection_release_address_with_correct_args(self): + self.address.release() + self.address.connection.release_address.assert_called_with( + None, + "aid1", + dry_run=False + ) + + def test_associate_calls_connection_associate_address_with_correct_args(self): + self.address.associate(1) + self.address.connection.associate_address.assert_called_with( + 1, + "192.168.1.1", + allocation_id="aid1", + allow_reassociation=False, dry_run=False ) diff -Nru python-boto-2.20.1/tests/unit/ec2/test_blockdevicemapping.py python-boto-2.29.1/tests/unit/ec2/test_blockdevicemapping.py --- python-boto-2.20.1/tests/unit/ec2/test_blockdevicemapping.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_blockdevicemapping.py 2014-05-30 20:49:34.000000000 +0000 @@ -65,7 +65,7 @@ retval = self.block_device_mapping.startElement("virtualName", None, None) assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping)) - def test_endElement_with_name_device_sets_current_name(self): + def test_endElement_with_name_device_sets_current_name_dev_null(self): self.block_device_mapping.endElement("device", "/dev/null", None) self.assertEqual(self.block_device_mapping.current_name, "/dev/null") @@ -96,7 +96,7 @@ # Autoscaling). self.set_http_response(status_code=200) dev_sdf = BlockDeviceType(snapshot_id='snap-12345') - dev_sdg = BlockDeviceType(snapshot_id='snap-12346') + dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=True) bdm = BlockDeviceMapping() bdm['/dev/sdf'] = dev_sdf @@ -115,7 +115,7 @@ 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMapping.2.DeviceName': '/dev/sdg', - 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMapping.2.Ebs.SnapshotId': 'snap-12346', 'ImageId': '123456', 'InstanceType': 'm1.large', diff -Nru python-boto-2.20.1/tests/unit/ec2/test_connection.py python-boto-2.29.1/tests/unit/ec2/test_connection.py --- python-boto-2.20.1/tests/unit/ec2/test_connection.py 2014-07-25 19:29:13.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -2,7 +2,7 @@ import httplib from datetime import datetime, timedelta -from mock import MagicMock, Mock, patch +from mock import MagicMock, Mock from tests.unit import unittest from tests.unit import AWSMockServiceTestCase @@ -968,7 +968,9 @@ """ snaps = [] - # Generate some dates offset by days, weeks, months + # Generate some dates offset by days, weeks, months. + # This is to validate the various types of snapshot logic handled by + # ``trim_snapshots``. now = datetime.now() dates = [ now, @@ -1234,6 +1236,62 @@ 'Version' ]) + def test_sriov_net_support_simple(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + image_location='s3://foo', + sriov_net_support='simple') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'ImageLocation': 's3://foo', + 'Name': 'name', + 'Description': 'description', + 'SriovNetSupport': 'simple' + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + def test_volume_delete_on_termination_on(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678', + delete_root_volume_on_termination=True) + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'true', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + + def test_volume_delete_on_termination_default(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'false', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + class TestTerminateInstances(TestEC2ConnectionBase): def default_body(self): @@ -1277,6 +1335,18 @@ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) + self.ec2.get_all_reservations() + self.assert_request_parameters({ + 'Action': 'DescribeInstances'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + self.ec2.get_only_instances() + self.assert_request_parameters({ + 'Action': 'DescribeInstances'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + def test_max_results(self): self.set_http_response(status_code=200) self.ec2.get_all_instances( @@ -1288,6 +1358,16 @@ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) + def test_next_token(self): + self.set_http_response(status_code=200) + self.ec2.get_all_reservations( + next_token='abcdefgh', + ) + self.assert_request_parameters({ + 'Action': 'DescribeInstances', + 'NextToken': 'abcdefgh'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) class TestDescribeTags(TestEC2ConnectionBase): @@ -1317,6 +1397,227 @@ 'SignatureVersion', 'Timestamp', 'Version']) +class TestSignatureAlteration(TestEC2ConnectionBase): + def test_unchanged(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['ec2'] + ) + + def test_switched(self): + region = RegionInfo( + name='cn-north-1', + endpoint='ec2.cn-north-1.amazonaws.com.cn', + connection_cls=EC2Connection + ) + + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + region=region + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4'] + ) + + +class TestAssociateAddress(TestEC2ConnectionBase): + def default_body(self): + return """ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + eipassoc-fc5ca095 + + """ + + def test_associate_address(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual(True, result) + + def test_associate_address_object(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address_object(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual('eipassoc-fc5ca095', result.association_id) + + +class TestAssociateAddressFail(TestEC2ConnectionBase): + def default_body(self): + return """ + + + + InvalidInstanceID.NotFound + The instance ID 'i-4cbc822a' does not exist + + + ea966190-f9aa-478e-9ede-cb5432daacc0 + Failure + + """ + + def test_associate_address(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual(False, result) + + +class TestDescribeVolumes(TestEC2ConnectionBase): + def default_body(self): + return """ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + vol-1a2b3c4d + 80 + + us-east-1a + in-use + YYYY-MM-DDTHH:MM:SS.SSSZ + + + vol-1a2b3c4d + i-1a2b3c4d + /dev/sdh + attached + YYYY-MM-DDTHH:MM:SS.SSSZ + false + + + standard + true + + + vol-5e6f7a8b + 80 + + us-east-1a + in-use + YYYY-MM-DDTHH:MM:SS.SSSZ + + + vol-5e6f7a8b + i-5e6f7a8b + /dev/sdz + attached + YYYY-MM-DDTHH:MM:SS.SSSZ + false + + + standard + false + + + + """ + + def test_get_all_volumes(self): + self.set_http_response(status_code=200) + result = self.ec2.get_all_volumes(volume_ids=['vol-1a2b3c4d', 'vol-5e6f7a8b']) + self.assert_request_parameters({ + 'Action': 'DescribeVolumes', + 'VolumeId.1': 'vol-1a2b3c4d', + 'VolumeId.2': 'vol-5e6f7a8b'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(result), 2) + self.assertEqual(result[0].id, 'vol-1a2b3c4d') + self.assertTrue(result[0].encrypted) + self.assertEqual(result[1].id, 'vol-5e6f7a8b') + self.assertFalse(result[1].encrypted) + + +class TestDescribeSnapshots(TestEC2ConnectionBase): + def default_body(self): + return """ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + snap-1a2b3c4d + vol-1a2b3c4d + pending + YYYY-MM-DDTHH:MM:SS.SSSZ + 80% + 111122223333 + 15 + Daily Backup + + true + + + + + snap-5e6f7a8b + vol-5e6f7a8b + completed + YYYY-MM-DDTHH:MM:SS.SSSZ + 100% + 111122223333 + 15 + Daily Backup + + false + + + + """ + + def test_get_all_snapshots(self): + self.set_http_response(status_code=200) + result = self.ec2.get_all_snapshots(snapshot_ids=['snap-1a2b3c4d', 'snap-5e6f7a8b']) + self.assert_request_parameters({ + 'Action': 'DescribeSnapshots', + 'SnapshotId.1': 'snap-1a2b3c4d', + 'SnapshotId.2': 'snap-5e6f7a8b'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(result), 2) + self.assertEqual(result[0].id, 'snap-1a2b3c4d') + self.assertTrue(result[0].encrypted) + self.assertEqual(result[1].id, 'snap-5e6f7a8b') + self.assertFalse(result[1].encrypted) + + +class TestCreateVolume(TestEC2ConnectionBase): + def default_body(self): + return """ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + vol-1a2b3c4d + 80 + + us-east-1a + creating + YYYY-MM-DDTHH:MM:SS.000Z + standard + true + + """ + + def test_create_volume(self): + self.set_http_response(status_code=200) + result = self.ec2.create_volume(80, 'us-east-1e', snapshot='snap-1a2b3c4d', + encrypted=True) + self.assert_request_parameters({ + 'Action': 'CreateVolume', + 'AvailabilityZone': 'us-east-1e', + 'Size': 80, + 'SnapshotId': 'snap-1a2b3c4d', + 'Encrypted': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(result.id, 'vol-1a2b3c4d') + self.assertTrue(result.encrypted) if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/ec2/test_instancetype.py python-boto-2.29.1/tests/unit/ec2/test_instancetype.py --- python-boto-2.20.1/tests/unit/ec2/test_instancetype.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_instancetype.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,120 @@ +#!/usr/bin/env python +import httplib + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +import boto.ec2 + +from boto.ec2.instancetype import InstanceType +from boto.ec2.connection import EC2Connection + +class TestEC2ConnectionBase(AWSMockServiceTestCase): + connection_class = EC2Connection + + def setUp(self): + super(TestEC2ConnectionBase, self).setUp() + self.ec2 = self.service_connection + + +class TestReservedInstanceOfferings(TestEC2ConnectionBase): + + def default_body(self): + return """ + + + + + m1.small15256 + + + + t1.micro15256 + + + + m1.medium110512 + + + + c1.medium210512 + + + + m1.large210512 + + + + m1.xlarge2101024 + + + + c1.xlarge2102048 + + + + m2.xlarge2102048 + + + + m3.xlarge4152048 + + + + m2.2xlarge2304096 + + + + m3.2xlarge4304096 + + + + cc1.4xlarge8603072 + + + + m2.4xlarge8604096 + + + + hi1.4xlarge81206144 + + + + cc2.8xlarge161206144 + + + + cg1.4xlarge1620012288 + + + + cr1.8xlarge1624016384 + + + + hs1.8xlarge4824000119808 + + + + + """ + + def test_get_instance_types(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_instance_types() + self.assertEqual(len(response), 18) + instance_type = response[0] + self.assertEqual(instance_type.name, 'm1.small') + self.assertEqual(instance_type.cores, '1') + self.assertEqual(instance_type.disk, '5') + self.assertEqual(instance_type.memory, '256') + instance_type = response[17] + self.assertEqual(instance_type.name, 'hs1.8xlarge') + self.assertEqual(instance_type.cores, '48') + self.assertEqual(instance_type.disk, '24000') + self.assertEqual(instance_type.memory, '119808') + + +if __name__ == '__main__': + unittest.main() diff -Nru python-boto-2.20.1/tests/unit/ec2/test_snapshot.py python-boto-2.29.1/tests/unit/ec2/test_snapshot.py --- python-boto-2.20.1/tests/unit/ec2/test_snapshot.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_snapshot.py 2014-05-30 20:49:34.000000000 +0000 @@ -28,12 +28,13 @@ demo_db_14_backup + false """ - def test_cancel_spot_instance_requests(self): + def test_describe_snapshots(self): self.set_http_response(status_code=200) response = self.service_connection.get_all_snapshots(['snap-1a2b3c4d', 'snap-9f8e7d6c'], owner=['self', '111122223333'], diff -Nru python-boto-2.20.1/tests/unit/ec2/test_spotinstance.py python-boto-2.29.1/tests/unit/ec2/test_spotinstance.py --- python-boto-2.20.1/tests/unit/ec2/test_spotinstance.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_spotinstance.py 2014-05-30 20:49:34.000000000 +0000 @@ -40,3 +40,71 @@ self.assertEqual(response[0].state, 'cancelled') self.assertEqual(response[1].id, 'sir-9f8e7d6c') self.assertEqual(response[1].state, 'cancelled') + + +class TestGetSpotPriceHistory(AWSMockServiceTestCase): + + connection_class = EC2Connection + + def default_body(self): + return """ + + b6c6978c-bd13-4ad7-9bc8-6f0ac9d32bcc + + + c3.large + Linux/UNIX + 0.032000 + 2013-12-28T12:17:43.000Z + us-west-2c + + + c3.large + Windows (Amazon VPC) + 0.104000 + 2013-12-28T07:49:40.000Z + us-west-2b + + + q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl + + """ + + def test_get_spot_price_history(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_spot_price_history( + instance_type='c3.large') + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'InstanceType': 'c3.large'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(response), 2) + self.assertEqual(response.next_token, + 'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl') + self.assertEqual(response.nextToken, + 'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl') + self.assertEqual(response[0].instance_type, 'c3.large') + self.assertEqual(response[0].availability_zone, 'us-west-2c') + self.assertEqual(response[1].instance_type, 'c3.large') + self.assertEqual(response[1].availability_zone, 'us-west-2b') + + response = self.service_connection.get_spot_price_history( + filters={'instance-type': 'c3.large'}) + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'Filter.1.Name': 'instance-type', + 'Filter.1.Value.1': 'c3.large'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + response = self.service_connection.get_spot_price_history( + next_token='foobar') + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'NextToken': 'foobar'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) diff -Nru python-boto-2.20.1/tests/unit/ec2/test_volume.py python-boto-2.29.1/tests/unit/ec2/test_volume.py --- python-boto-2.20.1/tests/unit/ec2/test_volume.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ec2/test_volume.py 2014-05-30 20:49:34.000000000 +0000 @@ -39,7 +39,6 @@ volume = Volume() volume.startElement("some name", "some attrs", None) startElement.assert_called_with( - volume, "some name", "some attrs", None @@ -79,10 +78,11 @@ retval = volume.startElement("not tagSet or attachmentSet", None, None) self.assertEqual(retval, None) - def check_that_attribute_has_been_set(self, name, value, attribute): + def check_that_attribute_has_been_set(self, name, value, attribute, obj_value=None): volume = Volume() volume.endElement(name, value, None) - self.assertEqual(getattr(volume, attribute), value) + expected_value = obj_value if obj_value is not None else value + self.assertEqual(getattr(volume, attribute), expected_value) def test_endElement_sets_correct_attributes_with_values(self): for arguments in [("volumeId", "some value", "id"), @@ -91,8 +91,9 @@ ("size", 5, "size"), ("snapshotId", 1, "snapshot_id"), ("availabilityZone", "some zone", "zone"), - ("someName", "some value", "someName")]: - self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + ("someName", "some value", "someName"), + ("encrypted", "true", "encrypted", True)]: + self.check_that_attribute_has_been_set(*arguments) def test_endElement_with_name_status_and_empty_string_value_doesnt_set_status(self): volume = Volume() diff -Nru python-boto-2.20.1/tests/unit/ecs/test_connection.py python-boto-2.29.1/tests/unit/ecs/test_connection.py --- python-boto-2.20.1/tests/unit/ecs/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ecs/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from boto.ecs import ECSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestECSConnection(AWSMockServiceTestCase): + connection_class = ECSConnection + + def default_body(self): + return """ + + + True + + B00008OE6I + + + + B00008OE6I + + Canon + Photography + Canon PowerShot S400 4MP Digital Camera w/ 3x Optical Zoom + + + + """ + + def test_item_lookup(self): + self.set_http_response(status_code=200) + item_set = self.service_connection.item_lookup( + ItemId='0316067938', + ResponseGroup='Reviews' + ) + + self.assert_request_parameters( + {'ItemId': '0316067938', + 'Operation': 'ItemLookup', + 'ResponseGroup': 'Reviews', + 'Service': 'AWSECommerceService'}, + ignore_params_values=['Version', 'AWSAccessKeyId', + 'SignatureMethod', 'SignatureVersion', + 'Timestamp']) + + items = list(item_set) + self.assertEqual(len(items), 1) + self.assertTrue(item_set.is_valid) + self.assertEqual(items[0].ASIN, 'B00008OE6I') diff -Nru python-boto-2.20.1/tests/unit/elasticache/test_api_interface.py python-boto-2.29.1/tests/unit/elasticache/test_api_interface.py --- python-boto-2.20.1/tests/unit/elasticache/test_api_interface.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/elasticache/test_api_interface.py 2014-05-30 20:49:34.000000000 +0000 @@ -15,6 +15,6 @@ 'Action': 'CreateCacheCluster', 'CacheClusterId': name, }, ignore_params_values=[ - 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', - 'Timestamp', 'ContentType', + 'Version', + 'ContentType', ]) diff -Nru python-boto-2.20.1/tests/unit/emr/test_connection.py python-boto-2.29.1/tests/unit/emr/test_connection.py --- python-boto-2.20.1/tests/unit/emr/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/emr/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -24,10 +24,16 @@ import boto.utils from datetime import datetime +from time import time from tests.unit import AWSMockServiceTestCase from boto.emr.connection import EmrConnection -from boto.emr.emrobject import JobFlowStepList +from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \ + ClusterStatus, ClusterSummaryList, \ + ClusterSummary, ClusterTimeline, InstanceInfo, \ + InstanceList, InstanceGroupInfo, \ + InstanceGroup, InstanceGroupList, JobFlow, \ + JobFlowStepList, Step, StepSummaryList, Cluster # These tests are just checking the basic structure of # the Elastic MapReduce code, by picking a few calls @@ -38,7 +44,49 @@ connection_class = EmrConnection def default_body(self): - return """""" + return """ + + + + + j-aaaaaaaaaaaa + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + analytics test + + + j-aaaaaaaaaaaab + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-21T02:53:08Z + 2014-01-21T02:56:40Z + 2014-01-21T03:40:22Z + + + test job + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ def test_list_clusters(self): self.set_http_response(status_code=200) @@ -49,6 +97,21 @@ 'Version': '2009-03-31', }) + self.assertTrue(isinstance(response, ClusterSummaryList)) + + self.assertEqual(len(response.clusters), 2) + self.assertTrue(isinstance(response.clusters[0], ClusterSummary)) + self.assertEqual(response.clusters[0].name, 'analytics test') + + self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus)) + + self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline)) + + self.assertEqual(response.clusters[0].status.timeline.creationdatetime, '2014-01-24T01:21:21Z') + self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z') + self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z') + + def test_list_clusters_created_before(self): self.set_http_response(status_code=200) @@ -92,7 +155,60 @@ connection_class = EmrConnection def default_body(self): - return """""" + return """ + + + + + ig-aaaaaaaaaaaaa + m1.large + ON_DEMAND + + + Job flow terminated + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:08Z + 2014-01-24T02:19:46Z + + + Master instance group + 1 + 0 + MASTER + + + ig-aaaaaaaaaaab + m1.large + ON_DEMAND + + + Job flow terminated + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + Core instance group + 2 + 0 + CORE + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + +""" def test_list_instance_groups(self): self.set_http_response(200) @@ -108,11 +224,96 @@ 'Version': '2009-03-31' }) + self.assertTrue(isinstance(response, InstanceGroupList)) + self.assertEqual(len(response.instancegroups), 2) + self.assertTrue(isinstance(response.instancegroups[0], InstanceGroupInfo)) + self.assertEqual(response.instancegroups[0].id, 'ig-aaaaaaaaaaaaa') + self.assertEqual(response.instancegroups[0].instancegrouptype, "MASTER") + self.assertEqual(response.instancegroups[0].instancetype, "m1.large") + self.assertEqual(response.instancegroups[0].market, "ON_DEMAND") + self.assertEqual(response.instancegroups[0].name, "Master instance group") + self.assertEqual(response.instancegroups[0].requestedinstancecount, '1') + self.assertEqual(response.instancegroups[0].runninginstancecount, '0') + self.assertTrue(isinstance(response.instancegroups[0].status, ClusterStatus)) + self.assertEqual(response.instancegroups[0].status.state, 'TERMINATED') + # status.statechangereason is not parsed into an object + #self.assertEqual(response.instancegroups[0].status.statechangereason.code, 'CLUSTER_TERMINATED') + class TestListInstances(AWSMockServiceTestCase): connection_class = EmrConnection def default_body(self): - return """""" + return """ + + + + + ci-123456789abc + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:26Z + 2014-01-24T01:25:25Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-60.us-west-1.compute.internal + 54.0.0.1 + ec2-54-0-0-1.us-west-1.compute.amazonaws.com + i-aaaaaaaa + 10.0.0.60 + + + ci-123456789abd + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:26Z + 2014-01-24T01:25:25Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-61.us-west-1.compute.internal + 54.0.0.2 + ec2-54-0-0-2.us-west-1.compute.amazonaws.com + i-aaaaaaab + 10.0.0.61 + + + ci-123456789abe3 + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:33Z + 2014-01-24T01:25:08Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-62.us-west-1.compute.internal + 54.0.0.3 + ec2-54-0-0-3.us-west-1.compute.amazonaws.com + i-aaaaaaac + 10.0.0.62 + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ def test_list_instances(self): self.set_http_response(200) @@ -121,6 +322,16 @@ self.service_connection.list_instances() response = self.service_connection.list_instances(cluster_id='j-123') + self.assertTrue(isinstance(response, InstanceList)) + self.assertEqual(len(response.instances), 3) + self.assertTrue(isinstance(response.instances[0], InstanceInfo)) + self.assertEqual(response.instances[0].ec2instanceid, 'i-aaaaaaaa') + self.assertEqual(response.instances[0].id, 'ci-123456789abc') + self.assertEqual(response.instances[0].privatednsname , 'ip-10-0-0-60.us-west-1.compute.internal') + self.assertEqual(response.instances[0].privateipaddress , '10.0.0.60') + self.assertEqual(response.instances[0].publicdnsname , 'ec2-54-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(response.instances[0].publicipaddress , '54.0.0.1') + self.assert_request_parameters({ 'Action': 'ListInstances', @@ -162,7 +373,7 @@ connection_class = EmrConnection def default_body(self): - return """""" + return """Step 1""" def test_list_steps(self): self.set_http_response(200) @@ -177,6 +388,8 @@ 'ClusterId': 'j-123', 'Version': '2009-03-31' }) + self.assertTrue(isinstance(response, StepSummaryList)) + self.assertEqual(response.steps[0].name, 'Step 1') def test_list_steps_with_states(self): self.set_http_response(200) @@ -193,7 +406,8 @@ 'StepStateList.member.2': 'FAILED', 'Version': '2009-03-31' }) - + self.assertTrue(isinstance(response, StepSummaryList)) + self.assertEqual(response.steps[0].name, 'Step 1') class TestListBootstrapActions(AWSMockServiceTestCase): connection_class = EmrConnection @@ -220,7 +434,47 @@ connection_class = EmrConnection def default_body(self): - return """""" + return """ + + + + j-aaaaaaaaa + + + us-west-1c + my_secret_key + + 2.4.2 + true + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + false + test analytics + 2.4.2 + + + hadoop + 1.0.3 + + + false + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ def test_describe_cluster(self): self.set_http_response(200) @@ -230,6 +484,20 @@ response = self.service_connection.describe_cluster(cluster_id='j-123') + self.assertTrue(isinstance(response, Cluster)) + self.assertEqual(response.id, 'j-aaaaaaaaa') + self.assertEqual(response.runningamiversion, '2.4.2') + self.assertEqual(response.visibletoallusers, 'true') + self.assertEqual(response.autoterminate, 'false') + self.assertEqual(response.name, 'test analytics') + self.assertEqual(response.requestedamiversion, '2.4.2') + self.assertEqual(response.terminationprotected, 'false') + self.assertEqual(response.ec2instanceattributes.ec2availabilityzone, "us-west-1c") + self.assertEqual(response.ec2instanceattributes.ec2keyname, 'my_secret_key') + self.assertEqual(response.status.state, 'TERMINATED') + self.assertEqual(response.applications[0].name, 'hadoop') + self.assertEqual(response.applications[0].version, '1.0.3') + self.assert_request_parameters({ 'Action': 'DescribeCluster', 'ClusterId': 'j-123', @@ -291,3 +559,310 @@ self.assertTrue(isinstance(response, JobFlowStepList)) self.assertEqual(response.stepids[0].value, 'Foo') self.assertEqual(response.stepids[1].value, 'Bar') + + +class TestBuildTagList(AWSMockServiceTestCase): + connection_class = EmrConnection + + def test_key_without_value_encoding(self): + input_dict = { + 'KeyWithNoValue': '', + 'AnotherKeyWithNoValue': None + } + res = self.service_connection._build_tag_list(input_dict) + # Keys are outputted in ascending key order. + expected = { + 'Tags.member.1.Key': 'AnotherKeyWithNoValue', + 'Tags.member.2.Key': 'KeyWithNoValue' + } + self.assertEqual(expected, res) + + def test_key_full_key_value_encoding(self): + input_dict = { + 'FirstKey': 'One', + 'SecondKey': 'Two' + } + res = self.service_connection._build_tag_list(input_dict) + # Keys are outputted in ascending key order. + expected = { + 'Tags.member.1.Key': 'FirstKey', + 'Tags.member.1.Value': 'One', + 'Tags.member.2.Key': 'SecondKey', + 'Tags.member.2.Value': 'Two' + } + self.assertEqual(expected, res) + + +class TestAddTag(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return """ + + + 88888888-8888-8888-8888-888888888888 + + + """ + + def test_add_mix_of_tags_with_without_values(self): + input_tags = { + 'FirstKey': 'One', + 'SecondKey': 'Two', + 'ZzzNoValue': '' + } + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.add_tags() + + with self.assertRaises(TypeError): + self.service_connection.add_tags('j-123') + + with self.assertRaises(AssertionError): + self.service_connection.add_tags('j-123', []) + + response = self.service_connection.add_tags('j-123', input_tags) + + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AddTags', + 'ResourceId': 'j-123', + 'Tags.member.1.Key': 'FirstKey', + 'Tags.member.1.Value': 'One', + 'Tags.member.2.Key': 'SecondKey', + 'Tags.member.2.Value': 'Two', + 'Tags.member.3.Key': 'ZzzNoValue', + 'Version': '2009-03-31' + }) + + +class TestRemoveTag(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return """ + + + 88888888-8888-8888-8888-888888888888 + + + """ + + def test_remove_tags(self): + input_tags = { + 'FirstKey': 'One', + 'SecondKey': 'Two', + 'ZzzNoValue': '' + } + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.add_tags() + + with self.assertRaises(TypeError): + self.service_connection.add_tags('j-123') + + with self.assertRaises(AssertionError): + self.service_connection.add_tags('j-123', []) + + response = self.service_connection.remove_tags('j-123', ['FirstKey', 'SecondKey']) + + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'RemoveTags', + 'ResourceId': 'j-123', + 'TagKeys.member.1': 'FirstKey', + 'TagKeys.member.2': 'SecondKey', + 'Version': '2009-03-31' + }) + +class DescribeJobFlowsTestBase(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return """ + + + + + 2.4.2 + + 2014-01-24T01:21:21Z + Terminated by user request + 2014-01-24T01:25:26Z + 2014-01-24T01:25:26Z + TERMINATED + 2014-01-24T02:19:46Z + + + true + + test analytics + j-aaaaaa + + + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + COMPLETED + 2014-01-24T01:26:08Z + + + + + s3://us-west-1.elasticmapreduce/libs/hive/hive-script + --base-path + s3://us-west-1.elasticmapreduce/libs/hive/ + --install-hive + --hive-versions + 0.11.0.1 + + s3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar + + + Setup hive + TERMINATE_JOB_FLOW + + + + + + us-west-1c + + m1.large + my_key + true + + + 2014-01-24T01:21:21Z + 0 + 2014-01-24T01:23:56Z + 2014-01-24T01:25:08Z + ENDED + 2014-01-24T02:19:46Z + 1 + m1.large + Job flow terminated + ON_DEMAND + ig-aaaaaa + MASTER + Master instance group + + + 2014-01-24T01:21:21Z + 0 + 2014-01-24T01:25:26Z + 2014-01-24T01:25:26Z + ENDED + 2014-01-24T02:19:46Z + 2 + m1.large + Job flow terminated + ON_DEMAND + ig-aaaaab + CORE + Core instance group + + + m1.large + i-aaaaaa + 1.0.3 + 12 + ec2-184-0-0-1.us-west-1.compute.amazonaws.com + 3 + false + + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ + +class TestDescribeJobFlows(DescribeJobFlowsTestBase): + + def test_describe_jobflows_response(self): + self.set_http_response(200) + + response = self.service_connection.describe_jobflows() + self.assertTrue(isinstance(response, list)) + + jf = response[0] + self.assertTrue(isinstance(jf, JobFlow)) + self.assertEqual(jf.amiversion, '2.4.2') + self.assertEqual(jf.visibletoallusers, 'true') + self.assertEqual(jf.name, 'test analytics') + self.assertEqual(jf.jobflowid, 'j-aaaaaa') + self.assertEqual(jf.ec2keyname, 'my_key') + self.assertEqual(jf.masterinstancetype, 'm1.large') + self.assertEqual(jf.availabilityzone, 'us-west-1c') + self.assertEqual(jf.keepjobflowalivewhennosteps, 'true') + self.assertEqual(jf.slaveinstancetype, 'm1.large') + self.assertEqual(jf.masterinstanceid, 'i-aaaaaa') + self.assertEqual(jf.hadoopversion, '1.0.3') + self.assertEqual(jf.normalizedinstancehours, '12') + self.assertEqual(jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(jf.instancecount, '3') + self.assertEqual(jf.terminationprotected, 'false') + + self.assertTrue(isinstance(jf.steps, list)) + step = jf.steps[0] + self.assertTrue(isinstance(step, Step)) + self.assertEqual(step.jar, 's3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar') + self.assertEqual(step.name, 'Setup hive') + self.assertEqual(step.actiononfailure, 'TERMINATE_JOB_FLOW') + + self.assertTrue(isinstance(jf.instancegroups, list)) + ig = jf.instancegroups[0] + self.assertTrue(isinstance(ig, InstanceGroup)) + self.assertEqual(ig.creationdatetime, '2014-01-24T01:21:21Z') + self.assertEqual(ig.state, 'ENDED') + self.assertEqual(ig.instancerequestcount, '1') + self.assertEqual(ig.instancetype, 'm1.large') + self.assertEqual(ig.laststatechangereason, 'Job flow terminated') + self.assertEqual(ig.market, 'ON_DEMAND') + self.assertEqual(ig.instancegroupid, 'ig-aaaaaa') + self.assertEqual(ig.instancerole, 'MASTER') + self.assertEqual(ig.name, 'Master instance group') + + def test_describe_jobflows_no_args(self): + self.set_http_response(200) + + self.service_connection.describe_jobflows() + + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + }, ignore_params_values=['Version']) + + def test_describe_jobflows_filtered(self): + self.set_http_response(200) + + now = datetime.now() + a_bit_before = datetime.fromtimestamp(time() - 1000) + + self.service_connection.describe_jobflows(states=['WAITING', 'RUNNING'], jobflow_ids=['j-aaaaaa', 'j-aaaaab'], created_after=a_bit_before, created_before=now) + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + 'JobFlowIds.member.1': 'j-aaaaaa', + 'JobFlowIds.member.2': 'j-aaaaab', + 'JobFlowStates.member.1': 'WAITING', + 'JobFlowStates.member.2': 'RUNNING', + 'CreatedAfter': a_bit_before.strftime(boto.utils.ISO8601), + 'CreatedBefore': now.strftime(boto.utils.ISO8601), + }, ignore_params_values=['Version']) + +class TestDescribeJobFlow(DescribeJobFlowsTestBase): + def test_describe_jobflow(self): + self.set_http_response(200) + + response = self.service_connection.describe_jobflow('j-aaaaaa') + self.assertTrue(isinstance(response, JobFlow)) + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + 'JobFlowIds.member.1': 'j-aaaaaa', + }, ignore_params_values=['Version']) diff -Nru python-boto-2.20.1/tests/unit/glacier/test_concurrent.py python-boto-2.29.1/tests/unit/glacier/test_concurrent.py --- python-boto-2.20.1/tests/unit/glacier/test_concurrent.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/glacier/test_concurrent.py 2014-05-30 20:49:34.000000000 +0000 @@ -58,14 +58,12 @@ def setUp(self): super(TestConcurrentUploader, self).setUp() self.stat_patch = mock.patch('os.stat') + self.addCleanup(self.stat_patch.stop) self.stat_mock = self.stat_patch.start() # Give a default value for tests that don't care # what the file size is. self.stat_mock.return_value.st_size = 1024 * 1024 * 8 - def tearDown(self): - self.stat_mock = self.stat_patch.start() - def test_calculate_required_part_size(self): self.stat_mock.return_value.st_size = 1024 * 1024 * 8 uploader = ConcurrentUploader(mock.Mock(), 'vault_name') diff -Nru python-boto-2.20.1/tests/unit/glacier/test_job.py python-boto-2.29.1/tests/unit/glacier/test_job.py --- python-boto-2.20.1/tests/unit/glacier/test_job.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/glacier/test_job.py 2014-05-30 20:49:34.000000000 +0000 @@ -19,6 +19,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +from StringIO import StringIO from tests.unit import unittest import mock @@ -55,6 +56,27 @@ # With validate_checksum set to False, this call succeeds. self.job.get_output(byte_range=(1, 1024), validate_checksum=False) + def test_download_to_fileobj(self): + http_response=mock.Mock(read=mock.Mock(return_value='xyz')) + response = GlacierResponse(http_response, None) + response['TreeHash'] = 'tree_hash' + self.api.get_job_output.return_value = response + fileobj = StringIO() + self.job.archive_size = 3 + with mock.patch('boto.glacier.job.tree_hash_from_str') as t: + t.return_value = 'tree_hash' + self.job.download_to_fileobj(fileobj) + fileobj.seek(0) + self.assertEqual(http_response.read.return_value, fileobj.read()) + + def test_calc_num_chunks(self): + self.job.archive_size = 0 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 0) + self.job.archive_size = 1 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 1) + self.job.archive_size = self.job.DefaultPartSize + 1 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 2) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/glacier/test_layer2.py python-boto-2.29.1/tests/unit/glacier/test_layer2.py --- python-boto-2.20.1/tests/unit/glacier/test_layer2.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/glacier/test_layer2.py 2014-05-30 20:49:34.000000000 +0000 @@ -33,6 +33,8 @@ from StringIO import StringIO +from datetime import datetime, tzinfo, timedelta + # Some fixture data from the Glacier docs FIXTURE_VAULT = { "CreationDate" : "2012-02-20T17:01:45.198Z", @@ -208,6 +210,33 @@ self.mock_layer1.delete_archive.assert_called_with("examplevault", "archive") + def test_initiate_job(self): + class UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return timedelta(0) + + def tzname(self, dt): + return "Z" + + def dst(self, dt): + return timedelta(0) + + self.mock_layer1.initiate_job.return_value = {'JobId': 'job-id'} + self.vault.retrieve_inventory(start_date=datetime(2014, 01, 01, tzinfo=UTC()), + end_date=datetime(2014, 01, 02, tzinfo=UTC()), + limit=100) + self.mock_layer1.initiate_job.assert_called_with( + 'examplevault', { + 'Type': 'inventory-retrieval', + 'InventoryRetrievalParameters': { + 'StartDate': '2014-01-01T00:00:00Z', + 'EndDate': '2014-01-02T00:00:00Z', + 'Limit': 100 + } + }) + def test_get_job(self): self.mock_layer1.describe_job.return_value = FIXTURE_ARCHIVE_JOB job = self.vault.get_job( diff -Nru python-boto-2.20.1/tests/unit/iam/test_connection.py python-boto-2.29.1/tests/unit/iam/test_connection.py --- python-boto-2.20.1/tests/unit/iam/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/iam/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -164,3 +164,143 @@ 'SAMLProviderArn': 'arn' }, ignore_params_values=['Version']) + + +class TestCreateRole(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + + /application_abc/component_xyz/ + arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access + S3Access + {"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]} + 2012-05-08T23:34:01.495Z + AROADBQP57FF2AEXAMPLE + + + + 4a93ceee-9966-11e1-b624-b1aEXAMPLE7c + + + """ + + def test_create_role_default(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com"]}}]}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_default_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + response = self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com.cn"]}}]}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_string_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role( + 'a_name', + # Historical usage. + assume_role_policy_document='{"hello": "policy"}' + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_data_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role( + 'a_name', + # With plain data, we should dump it for them. + assume_role_policy_document={"hello": "policy"} + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + +class TestGetSigninURL(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + false + + foocorporation + anotherunused + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_default(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/ec2' + ) + + def test_get_signin_url_s3(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url(service='s3') + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/s3' + ) + + def test_get_signin_url_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/ec2' + ) + + +class TestGetSigninURL(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + false + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_no_aliases(self): + self.set_http_response(status_code=200) + + with self.assertRaises(Exception): + self.service_connection.get_signin_url() diff -Nru python-boto-2.20.1/tests/unit/__init__.py python-boto-2.29.1/tests/unit/__init__.py --- python-boto-2.20.1/tests/unit/__init__.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/__init__.py 2014-05-30 20:49:34.000000000 +0000 @@ -4,6 +4,7 @@ import unittest import httplib +import mock from mock import Mock @@ -77,3 +78,36 @@ def default_body(self): return '' + + +class MockServiceWithConfigTestCase(AWSMockServiceTestCase): + def setUp(self): + super(MockServiceWithConfigTestCase, self).setUp() + self.environ = {} + self.config = {} + self.config_patch = mock.patch('boto.provider.config.get', + self.get_config) + self.has_config_patch = mock.patch('boto.provider.config.has_option', + self.has_config) + self.environ_patch = mock.patch('os.environ', self.environ) + self.config_patch.start() + self.has_config_patch.start() + self.environ_patch.start() + + def tearDown(self): + self.config_patch.stop() + self.has_config_patch.stop() + self.environ_patch.stop() + + def has_config(self, section_name, key): + try: + self.config[section_name][key] + return True + except KeyError: + return False + + def get_config(self, section_name, key, default=None): + try: + return self.config[section_name][key] + except KeyError: + return None diff -Nru python-boto-2.20.1/tests/unit/mws/test_connection.py python-boto-2.29.1/tests/unit/mws/test_connection.py --- python-boto-2.20.1/tests/unit/mws/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/mws/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -20,7 +20,9 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.mws.connection import MWSConnection, api_call_map +from boto.mws.connection import MWSConnection, api_call_map, destructure_object +from boto.mws.response import (ResponseElement, GetFeedSubmissionListResult, + ResponseFactory) from tests.unit import AWSMockServiceTestCase @@ -48,6 +50,38 @@
    """ + def test_destructure_object(self): + # Test that parsing of user input to Amazon input works. + response = ResponseElement() + response.C = 'four' + response.D = 'five' + inputs = [ + ('A', 'B'), ['B', 'A'], set(['C']), + False, 'String', {'A': 'one', 'B': 'two'}, + response, + {'A': 'one', 'B': 'two', + 'C': [{'D': 'four', 'E': 'five'}, + {'F': 'six', 'G': 'seven'}]}, + ] + outputs = [ + {'Prefix.1': 'A', 'Prefix.2': 'B'}, + {'Prefix.1': 'B', 'Prefix.2': 'A'}, + {'Prefix.1': 'C'}, + {'Prefix': 'false'}, {'Prefix': 'String'}, + {'Prefix.A': 'one', 'Prefix.B': 'two'}, + {'Prefix.C': 'four', 'Prefix.D': 'five'}, + {'Prefix.A': 'one', 'Prefix.B': 'two', + 'Prefix.C.member.1.D': 'four', + 'Prefix.C.member.1.E': 'five', + 'Prefix.C.member.2.F': 'six', + 'Prefix.C.member.2.G': 'seven'} + ] + for user, amazon in zip(inputs, outputs): + result = {} + members = user is inputs[-1] + destructure_object(user, result, prefix='Prefix', members=members) + self.assertEqual(result, amazon) + def test_built_api_call_map(self): # Ensure that the map is populated. # It starts empty, but the decorators should add to it as they're @@ -63,12 +97,38 @@ func = self.service_connection.method_for('GetFeedSubmissionList') # Ensure the right name was found. self.assertTrue(callable(func)) - self.assertEqual(func, self.service_connection.get_feed_submission_list) + ideal = self.service_connection.get_feed_submission_list + self.assertEqual(func, ideal) # Check a non-existent action. func = self.service_connection.method_for('NotHereNorThere') self.assertEqual(func, None) + def test_response_factory(self): + connection = self.service_connection + body = self.default_body() + action = 'GetFeedSubmissionList' + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, + GetFeedSubmissionListResult) + + class MyResult(GetFeedSubmissionListResult): + _hello = '_world' + + scope = {'GetFeedSubmissionListResult': MyResult} + connection._setup_factories([scope]) + + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, MyResult) + self.assertEqual(response._result._hello, '_world') + self.assertEqual(response._result.HasNext, 'true') + def test_get_service_status(self): with self.assertRaises(AttributeError) as err: self.service_connection.get_service_status() diff -Nru python-boto-2.20.1/tests/unit/mws/test_response.py python-boto-2.29.1/tests/unit/mws/test_response.py --- python-boto-2.20.1/tests/unit/mws/test_response.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/mws/test_response.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,4 +1,5 @@ #!/usr/bin/env python +import unittest from boto.mws.connection import MWSConnection from boto.mws.response import (ResponseFactory, ResponseElement, Element, MemberList, ElementList, SimpleList) @@ -29,7 +30,7 @@ Bam """ - obj = self.issue_test('Test9', Test9Result, text) + obj = self.check_issue(Test9Result, text) Item = obj._result.Item useful = lambda x: not x[0].startswith('_') nest = dict(filter(useful, Item.Nest.__dict__.items())) @@ -59,7 +60,7 @@ 67 """ - obj = self.issue_test('Test8', Test8Result, text) + obj = self.check_issue(Test8Result, text) self.assertSequenceEqual( map(int, obj._result.Item), range(4), @@ -116,7 +117,7 @@ """ - obj = self.issue_test('Test7', Test7Result, text) + obj = self.check_issue(Test7Result, text) item = obj._result.Item self.assertEqual(len(item), 3) nests = [z.Nest for z in filter(lambda x: x.Nest, item)] @@ -151,7 +152,7 @@ Six """ - obj = self.issue_test('Test6', Test6Result, text) + obj = self.check_issue(Test6Result, text) self.assertSequenceEqual( [e.Value for e in obj._result.Item], ['One', 'Two', 'Six'], @@ -167,7 +168,7 @@ text = """ """ - obj = self.issue_test('Test5', Test5Result, text) + obj = self.check_issue(Test5Result, text) self.assertSequenceEqual(obj._result.Item, []) def test_parsing_missing_member_list(self): @@ -176,7 +177,7 @@ text = """ """ - obj = self.issue_test('Test4', Test4Result, text) + obj = self.check_issue(Test4Result, text) self.assertSequenceEqual(obj._result.Item, []) def test_parsing_element_lists(self): @@ -189,7 +190,7 @@ Baz Zoo """ - obj = self.issue_test('Test1', Test1Result, text) + obj = self.check_issue(Test1Result, text) self.assertTrue(len(obj._result.Item) == 3) elements = lambda x: getattr(x, 'Foo', getattr(x, 'Zip', '?')) elements = map(elements, obj._result.Item) @@ -201,7 +202,7 @@ text = """ """ - obj = self.issue_test('Test2', Test2Result, text) + obj = self.check_issue(Test2Result, text) self.assertEqual(obj._result.Item, []) def test_parsing_simple_lists(self): @@ -213,12 +214,14 @@ Bif Baz """ - obj = self.issue_test('Test3', Test3Result, text) + obj = self.check_issue(Test3Result, text) self.assertSequenceEqual(obj._result.Item, ['Bar', 'Bif', 'Baz']) - def issue_test(self, action, klass, text): - cls = ResponseFactory(action, force=klass) - return self.service_connection._parse_response(cls, text) + def check_issue(self, klass, text): + action = klass.__name__[:-len('Result')] + factory = ResponseFactory(scopes=[{klass.__name__: klass}]) + parser = factory(action, connection=self.service_connection) + return self.service_connection._parse_response(parser, 'text/xml', text) if __name__ == "__main__": diff -Nru python-boto-2.20.1/tests/unit/provider/test_provider.py python-boto-2.29.1/tests/unit/provider/test_provider.py --- python-boto-2.20.1/tests/unit/provider/test_provider.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/provider/test_provider.py 2014-05-30 20:49:34.000000000 +0000 @@ -3,8 +3,10 @@ from tests.unit import unittest import mock +import os from boto import provider +from boto.compat import expanduser INSTANCE_CONFIG = { @@ -24,17 +26,25 @@ def setUp(self): self.environ = {} self.config = {} + self.shared_config = {} self.metadata_patch = mock.patch('boto.utils.get_instance_metadata') self.config_patch = mock.patch('boto.provider.config.get', self.get_config) self.has_config_patch = mock.patch('boto.provider.config.has_option', self.has_config) + self.config_object_patch = mock.patch.object( + provider.Config, 'get', self.get_shared_config) + self.has_config_object_patch = mock.patch.object( + provider.Config, 'has_option', self.has_shared_config) self.environ_patch = mock.patch('os.environ', self.environ) self.get_instance_metadata = self.metadata_patch.start() + self.get_instance_metadata.return_value = None self.config_patch.start() self.has_config_patch.start() + self.config_object_patch.start() + self.has_config_object_patch.start() self.environ_patch.start() @@ -42,6 +52,8 @@ self.metadata_patch.stop() self.config_patch.stop() self.has_config_patch.stop() + self.config_object_patch.stop() + self.has_config_object_patch.stop() self.environ_patch.stop() def has_config(self, section_name, key): @@ -57,6 +69,19 @@ except KeyError: return None + def has_shared_config(self, section_name, key): + try: + self.shared_config[section_name][key] + return True + except KeyError: + return False + + def get_shared_config(self, section_name, key): + try: + return self.shared_config[section_name][key] + except KeyError: + return None + def test_passed_in_values_are_used(self): p = provider.Provider('aws', 'access_key', 'secret_key', 'security_token') self.assertEqual(p.access_key, 'access_key') @@ -71,6 +96,52 @@ self.assertEqual(p.secret_key, 'env_secret_key') self.assertIsNone(p.security_token) + def test_environment_variable_aws_security_token(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertEqual(p.security_token, 'env_security_token') + + def test_config_profile_values_are_used(self): + self.config = { + 'profile dev': { + 'aws_access_key_id': 'dev_access_key', + 'aws_secret_access_key': 'dev_secret_key', + }, 'profile prod': { + 'aws_access_key_id': 'prod_access_key', + 'aws_secret_access_key': 'prod_secret_key', + }, 'Credentials': { + 'aws_access_key_id': 'default_access_key', + 'aws_secret_access_key': 'default_secret_key' + } + } + p = provider.Provider('aws', profile_name='prod') + self.assertEqual(p.access_key, 'prod_access_key') + self.assertEqual(p.secret_key, 'prod_secret_key') + q = provider.Provider('aws', profile_name='dev') + self.assertEqual(q.access_key, 'dev_access_key') + self.assertEqual(q.secret_key, 'dev_secret_key') + + def test_config_missing_profile(self): + # None of these default profiles should be loaded! + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } + self.config = { + 'Credentials': { + 'aws_access_key_id': 'default_access_key', + 'aws_secret_access_key': 'default_secret_key' + } + } + with self.assertRaises(provider.ProfileNotFoundError): + provider.Provider('aws', profile_name='doesntexist') + def test_config_values_are_used(self): self.config = { 'Credentials': { @@ -83,6 +154,19 @@ self.assertEqual(p.secret_key, 'cfg_secret_key') self.assertIsNone(p.security_token) + def test_config_value_security_token_is_used(self): + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + 'aws_security_token': 'cfg_security_token', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'cfg_secret_key') + self.assertEqual(p.security_token, 'cfg_security_token') + def test_keyring_is_used(self): self.config = { 'Credentials': { @@ -110,9 +194,36 @@ if not imported: del sys.modules['keyring'] - def test_env_vars_beat_config_values(self): + def test_passed_in_values_beat_env_vars(self): self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + p = provider.Provider('aws', 'access_key', 'secret_key') + self.assertEqual(p.access_key, 'access_key') + self.assertEqual(p.secret_key, 'secret_key') + self.assertEqual(p.security_token, None) + + def test_env_vars_beat_shared_creds_values(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertIsNone(p.security_token) + + def test_shared_creds_beat_config_values(self): + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } self.config = { 'Credentials': { 'aws_access_key_id': 'cfg_access_key', @@ -120,9 +231,89 @@ } } p = provider.Provider('aws') + self.assertEqual(p.access_key, 'shared_access_key') + self.assertEqual(p.secret_key, 'shared_secret_key') + self.assertIsNone(p.security_token) + + def test_shared_creds_profile_beats_defaults(self): + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + }, + 'foo': { + 'aws_access_key_id': 'foo_access_key', + 'aws_secret_access_key': 'foo_secret_key', + } + } + p = provider.Provider('aws', profile_name='foo') + self.assertEqual(p.access_key, 'foo_access_key') + self.assertEqual(p.secret_key, 'foo_secret_key') + self.assertIsNone(p.security_token) + + def test_env_profile_loads_profile(self): + self.environ['AWS_PROFILE'] = 'foo' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + }, + 'foo': { + 'aws_access_key_id': 'shared_access_key_foo', + 'aws_secret_access_key': 'shared_secret_key_foo', + } + } + self.config = { + 'profile foo': { + 'aws_access_key_id': 'cfg_access_key_foo', + 'aws_secret_access_key': 'cfg_secret_key_foo', + }, + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'shared_access_key_foo') + self.assertEqual(p.secret_key, 'shared_secret_key_foo') + self.assertIsNone(p.security_token) + + self.shared_config = {} + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key_foo') + self.assertEqual(p.secret_key, 'cfg_secret_key_foo') + self.assertIsNone(p.security_token) + + def test_env_vars_security_token_beats_config_values(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + 'aws_security_token': 'shared_security_token', + } + } + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + 'aws_security_token': 'cfg_security_token', + } + } + p = provider.Provider('aws') self.assertEqual(p.access_key, 'env_access_key') self.assertEqual(p.secret_key, 'env_secret_key') - self.assertIsNone(p.security_token) + self.assertEqual(p.security_token, 'env_security_token') + + self.environ.clear() + p = provider.Provider('aws') + self.assertEqual(p.security_token, 'shared_security_token') + + self.shared_config.clear() + p = provider.Provider('aws') + self.assertEqual(p.security_token, 'cfg_security_token') def test_metadata_server_credentials(self): self.get_instance_metadata.return_value = INSTANCE_CONFIG @@ -135,7 +326,7 @@ 'meta-data/iam/security-credentials/') def test_refresh_credentials(self): - now = datetime.now() + now = datetime.utcnow() first_expiration = (now + timedelta(seconds=10)).strftime( "%Y-%m-%dT%H:%M:%SZ") credentials = { @@ -182,6 +373,51 @@ timeout=4.0, num_retries=10, data='meta-data/iam/security-credentials/') + def test_provider_google(self): + self.environ['GS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['GS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.shared_config = { + 'default': { + 'gs_access_key_id': 'shared_access_key', + 'gs_secret_access_key': 'shared_secret_key', + } + } + self.config = { + 'Credentials': { + 'gs_access_key_id': 'cfg_access_key', + 'gs_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('google') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + + self.environ.clear() + p = provider.Provider('google') + self.assertEqual(p.access_key, 'shared_access_key') + self.assertEqual(p.secret_key, 'shared_secret_key') + + self.shared_config.clear() + p = provider.Provider('google') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'cfg_secret_key') + + @mock.patch('os.path.exists', return_value=True) + @mock.patch.object(provider.Config, 'load_from_path') + def test_shared_config_loading(self, load_from_path, exists): + provider.Provider('aws') + path = os.path.join(expanduser('~'), '.aws', 'credentials') + exists.assert_called_once_with(path) + load_from_path.assert_called_once_with(path) + + exists.reset_mock() + load_from_path.reset_mock() + + provider.Provider('google') + path = os.path.join(expanduser('~'), '.google', 'credentials') + exists.assert_called_once_with(path) + load_from_path.assert_called_once_with(path) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/rds/test_connection.py python-boto-2.29.1/tests/unit/rds/test_connection.py --- python-boto-2.20.1/tests/unit/rds/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/rds/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -28,7 +28,9 @@ from boto.rds import RDSConnection from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership from boto.rds.parametergroup import ParameterGroup +from boto.rds.logfile import LogFile, LogFileObject +import xml.sax.saxutils as saxutils class TestRDSConnection(AWSMockServiceTestCase): connection_class = RDSConnection @@ -550,6 +552,187 @@ self.assertEqual(options.engine_name, 'oracle-se1') self.assertEqual(options.major_engine_version, '11.2') +class TestRDSLogFile(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSLogFile, self).setUp() + + def default_body(self): + return """ + + + + + 1364403600000 + error/mysql-error-running.log + 0 + + + 1364338800000 + error/mysql-error-running.log.0 + 0 + + + 1364342400000 + error/mysql-error-running.log.1 + 0 + + + 1364346000000 + error/mysql-error-running.log.2 + 0 + + + 1364349600000 + error/mysql-error-running.log.3 + 0 + + + 1364405700000 + error/mysql-error.log + 0 + + + + + d70fb3b3-9704-11e2-a0db-871552e0ef19 + + + """ + + def test_get_all_logs_simple(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_logs('db1') + + self.assert_request_parameters({ + 'Action': 'DescribeDBLogFiles', + 'DBInstanceIdentifier': 'db1', + }, ignore_params_values=['Version']) + + self.assertEqual(len(response), 6) + self.assertTrue(isinstance(response[0], LogFile)) + self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log') + self.assertEqual(response[0].last_written, '1364403600000') + self.assertEqual(response[0].size, '0') + + def test_get_all_logs_filtered(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_logs('db_instance_1', max_records=100, marker='error/mysql-error.log', file_size=2000000, filename_contains='error', file_last_written=12345678) + + self.assert_request_parameters({ + 'Action': 'DescribeDBLogFiles', + 'DBInstanceIdentifier': 'db_instance_1', + 'MaxRecords': 100, + 'Marker': 'error/mysql-error.log', + 'FileSize': 2000000, + 'FilenameContains': 'error', + 'FileLastWritten': 12345678, + }, ignore_params_values=['Version']) + + self.assertEqual(len(response), 6) + self.assertTrue(isinstance(response[0], LogFile)) + self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log') + self.assertEqual(response[0].last_written, '1364403600000') + self.assertEqual(response[0].size, '0') + + +class TestRDSLogFileDownload(AWSMockServiceTestCase): + connection_class = RDSConnection + logfile_sample = """ +??2014-01-26 23:59:00.01 spid54 Microsoft SQL Server 2012 - 11.0.2100.60 (X64) + + Feb 10 2012 19:39:15 + + Copyright (c) Microsoft Corporation + + Web Edition (64-bit) on Windows NT 6.1 <X64> (Build 7601: Service Pack 1) (Hypervisor) + + + +2014-01-26 23:59:00.01 spid54 (c) Microsoft Corporation. + +2014-01-26 23:59:00.01 spid54 All rights reserved. + +2014-01-26 23:59:00.01 spid54 Server process ID is 2976. + +2014-01-26 23:59:00.01 spid54 System Manufacturer: 'Xen', System Model: 'HVM domU'. + +2014-01-26 23:59:00.01 spid54 Authentication mode is MIXED. + +2014-01-26 23:59:00.01 spid54 Logging SQL Server messages in file 'D:\RDSDBDATA\Log\ERROR'. + +2014-01-26 23:59:00.01 spid54 The service account is 'WORKGROUP\AMAZONA-NUQUUMV$'. This is an informational message; no user action is required. + +2014-01-26 23:59:00.01 spid54 The error log has been reinitialized. See the previous log for older entries. + +2014-01-27 00:00:56.42 spid25s This instance of SQL Server has been using a process ID of 2976 since 10/21/2013 2:16:50 AM (local) 10/21/2013 2:16:50 AM (UTC). This is an informational message only; no user action is required. + +2014-01-27 09:35:15.43 spid71 I/O is frozen on database model. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid72 I/O is frozen on database msdb. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid74 I/O is frozen on database rdsadmin. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid73 I/O is frozen on database master. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:25.57 spid73 I/O was resumed on database master. No user action is required. + +2014-01-27 09:35:25.57 spid74 I/O was resumed on database rdsadmin. No user action is required. + +2014-01-27 09:35:25.57 spid71 I/O was resumed on database model. No user action is required. + +2014-01-27 09:35:25.57 spid72 I/O was resumed on database msdb. No user action is required. + """ + + def setUp(self): + super(TestRDSLogFileDownload, self).setUp() + + def default_body(self): + return """ + + + 0:4485 + %s + false + + + 27143615-87ae-11e3-acc9-fb64b157268e + + + """ % self.logfile_sample + + def test_single_download(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_log_file('db1', 'foo.log') + + self.assertTrue(isinstance(response, LogFileObject)) + self.assertEqual(response.marker, '0:4485') + self.assertEqual(response.dbinstance_id, 'db1') + self.assertEqual(response.log_filename, 'foo.log') + + self.assertEqual(response.data, saxutils.unescape(self.logfile_sample)) + + self.assert_request_parameters({ + 'Action': 'DownloadDBLogFilePortion', + 'DBInstanceIdentifier': 'db1', + 'LogFileName': 'foo.log', + }, ignore_params_values=['Version']) + + def test_multi_args(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_log_file('db1', 'foo.log', marker='0:4485', number_of_lines=10) + + self.assertTrue(isinstance(response, LogFileObject)) + + self.assert_request_parameters({ + 'Action': 'DownloadDBLogFilePortion', + 'DBInstanceIdentifier': 'db1', + 'Marker': '0:4485', + 'NumberOfLines': 10, + 'LogFileName': 'foo.log', + }, ignore_params_values=['Version']) + class TestRDSOptionGroupOptions(AWSMockServiceTestCase): connection_class = RDSConnection diff -Nru python-boto-2.20.1/tests/unit/rds2/test_connection.py python-boto-2.29.1/tests/unit/rds2/test_connection.py --- python-boto-2.20.1/tests/unit/rds2/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/rds2/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,209 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.securitygroup import SecurityGroup +from boto.rds2.layer1 import RDSConnection + + +class TestRDS2Connection(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDS2Connection, self).setUp() + + def default_body(self): + return """{ + "DescribeDBInstancesResponse": { + "DescribeDBInstancesResult": { + "DBInstances": [{ + "DBInstance": { + "Iops": 2000, + "BackupRetentionPeriod": 1, + "MultiAZ": false, + "DBInstanceStatus": "backing-up", + "DBInstanceIdentifier": "mydbinstance2", + "PreferredBackupWindow": "10:30-11:00", + "PreferredMaintenanceWindow": "wed:06:30-wed:07:00", + "OptionGroupMembership": { + "OptionGroupName": "default:mysql-5-5", + "Status": "in-sync" + }, + "AvailabilityZone": "us-west-2b", + "ReadReplicaDBInstanceIdentifiers": null, + "Engine": "mysql", + "PendingModifiedValues": null, + "LicenseModel": "general-public-license", + "DBParameterGroups": [{ + "DBParameterGroup": { + "ParameterApplyStatus": "in-sync", + "DBParameterGroupName": "default.mysql5.5" + } + }], + "Endpoint": { + "Port": 3306, + "Address": "mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com" + }, + "EngineVersion": "5.5.27", + "DBSecurityGroups": [{ + "DBSecurityGroup": { + "Status": "active", + "DBSecurityGroupName": "default" + } + }], + "VpcSecurityGroups": [{ + "VpcSecurityGroupMembership": { + "VpcSecurityGroupId": "sg-1", + "Status": "active" + } + }], + "DBName": "mydb2", + "AutoMinorVersionUpgrade": true, + "InstanceCreateTime": "2012-10-03T22:01:51.047Z", + "AllocatedStorage": 200, + "DBInstanceClass": "db.m1.large", + "MasterUsername": "awsuser", + "StatusInfos": [{ + "DBInstanceStatusInfo": { + "Message": null, + "Normal": true, + "Status": "replicating", + "StatusType": "read replication" + } + }], + "DBSubnetGroup": { + "VpcId": "990524496922", + "SubnetGroupStatus": "Complete", + "DBSubnetGroupDescription": "My modified DBSubnetGroup", + "DBSubnetGroupName": "mydbsubnetgroup", + "Subnets": [{ + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-7c5b4115", + "SubnetAvailabilityZone": { + "Name": "us-east-1c" + } + }, + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-7b5b4112", + "SubnetAvailabilityZone": { + "Name": "us-east-1b" + } + }, + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-3ea6bd57", + "SubnetAvailabilityZone": { + "Name": "us-east-1d" + } + } + }] + } + } + }] + } + } + }""" + + def test_describe_db_instances(self): + self.set_http_response(status_code=200) + response = self.service_connection.describe_db_instances('instance_id') + self.assertEqual(len(response), 1) + self.assert_request_parameters({ + 'Action': 'DescribeDBInstances', + 'ContentType': 'JSON', + 'DBInstanceIdentifier': 'instance_id', + }, ignore_params_values=['Version']) + db = response['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0]\ + ['DBInstance'] + self.assertEqual(db['DBInstanceIdentifier'], 'mydbinstance2') + self.assertEqual(db['InstanceCreateTime'], '2012-10-03T22:01:51.047Z') + self.assertEqual(db['Engine'], 'mysql') + self.assertEqual(db['DBInstanceStatus'], 'backing-up') + self.assertEqual(db['AllocatedStorage'], 200) + self.assertEqual(db['Endpoint']['Port'], 3306) + self.assertEqual(db['DBInstanceClass'], 'db.m1.large') + self.assertEqual(db['MasterUsername'], 'awsuser') + self.assertEqual(db['AvailabilityZone'], 'us-west-2b') + self.assertEqual(db['BackupRetentionPeriod'], 1) + self.assertEqual(db['PreferredBackupWindow'], '10:30-11:00') + self.assertEqual(db['PreferredMaintenanceWindow'], + 'wed:06:30-wed:07:00') + self.assertEqual(db['MultiAZ'], False) + self.assertEqual(db['Iops'], 2000) + self.assertEqual(db['PendingModifiedValues'], None) + self.assertEqual( + db['DBParameterGroups'][0]['DBParameterGroup']\ + ['DBParameterGroupName'], + 'default.mysql5.5' + ) + self.assertEqual( + db['DBSecurityGroups'][0]['DBSecurityGroup']['DBSecurityGroupName'], + 'default' + ) + self.assertEqual( + db['DBSecurityGroups'][0]['DBSecurityGroup']['Status'], + 'active' + ) + self.assertEqual(len(db['StatusInfos']), 1) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Message'], + None + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Normal'], + True + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Status'], + 'replicating' + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['StatusType'], + 'read replication' + ) + self.assertEqual( + db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']['Status'], + 'active' + ) + self.assertEqual( + db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']\ + ['VpcSecurityGroupId'], + 'sg-1' + ) + self.assertEqual(db['LicenseModel'], 'general-public-license') + self.assertEqual(db['EngineVersion'], '5.5.27') + self.assertEqual(db['AutoMinorVersionUpgrade'], True) + self.assertEqual( + db['DBSubnetGroup']['DBSubnetGroupName'], + 'mydbsubnetgroup' + ) + + +if __name__ == '__main__': + unittest.main() + diff -Nru python-boto-2.20.1/tests/unit/route53/test_connection.py python-boto-2.29.1/tests/unit/route53/test_connection.py --- python-boto-2.20.1/tests/unit/route53/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/route53/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -21,15 +21,20 @@ # IN THE SOFTWARE. # import mock +import re +import xml.dom.minidom from boto.exception import BotoServerError from boto.route53.connection import Route53Connection from boto.route53.exception import DNSServerError +from boto.route53.record import ResourceRecordSets, Record +from boto.route53.zone import Zone +from nose.plugins.attrib import attr from tests.unit import unittest from tests.unit import AWSMockServiceTestCase - +@attr(route53=True) class TestRoute53Connection(AWSMockServiceTestCase): connection_class = Route53Connection @@ -82,3 +87,378 @@ # Unpatch. self.service_connection._retry_handler = orig_retry + +@attr(route53=True) +class TestCreateZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreateZoneRoute53, self).setUp() + + def default_body(self): + return """ + + + /hostedzone/Z11111 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + + 2 + + + /change/C1111111111111 + PENDING + 2014-02-02T10:19:29.928Z + + + + ns-100.awsdns-01.com + ns-1000.awsdns-01.co.uk + ns-1000.awsdns-01.org + ns-900.awsdns-01.net + + + + """ + + def test_create_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.create_zone("example.com.") + + self.assertTrue(isinstance(response, Zone)) + self.assertEqual(response.id, "Z11111") + self.assertEqual(response.name, "example.com.") + + def test_create_hosted_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.create_hosted_zone("example.com.", "my_ref", "this is a comment") + + self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'], + ['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net']) + +@attr(route53=True) +class TestGetZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetZoneRoute53, self).setUp() + + def default_body(self): + return """ + + + + /hostedzone/Z1111 + example2.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + 3 + + + /hostedzone/Z2222 + example1.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef + + 6 + + + /hostedzone/Z3333 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeg + + 6 + + + false + 100 + + """ + + def test_list_zones(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_all_hosted_zones() + + domains = ['example2.com.', 'example1.com.', 'example.com.'] + print response['ListHostedZonesResponse']['HostedZones'][0] + for d in response['ListHostedZonesResponse']['HostedZones']: + print "Removing: %s" % d['Name'] + domains.remove(d['Name']) + + self.assertEqual(domains, []) + + def test_get_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_zone('example.com.') + + self.assertTrue(isinstance(response, Zone)) + self.assertEqual(response.name, "example.com.") + +@attr(route53=True) +class TestGetHostedZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetHostedZoneRoute53, self).setUp() + + def default_body(self): + return """ + + + /hostedzone/Z1111 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + 3 + + + + ns-1000.awsdns-40.org + ns-200.awsdns-30.com + ns-900.awsdns-50.net + ns-1000.awsdns-00.co.uk + + + +""" + + def test_list_zones(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_hosted_zone("Z1111") + + self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Id'], '/hostedzone/Z1111') + self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Name'], 'example.com.') + self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'], + ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk']) + +@attr(route53=True) +class TestGetAllRRSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetAllRRSetsRoute53, self).setUp() + + def default_body(self): + return """ + + + + test.example.com. + A + 60 + + + 10.0.0.1 + + + + + www.example.com. + A + 60 + + + 10.0.0.2 + + + + + us-west-2-evaluate-health.example.com. + A + latency-example-us-west-2-evaluate-health + us-west-2 + + ABCDEFG123456 + true + example-123456-evaluate-health.us-west-2.elb.amazonaws.com. + + + + us-west-2-no-evaluate-health.example.com. + A + latency-example-us-west-2-no-evaluate-health + us-west-2 + + ABCDEFG567890 + false + example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com. + + + + failover.example.com. + A + failover-primary + PRIMARY + 60 + + + 10.0.0.4 + + + + + false + 100 + + """ + + def test_get_all_rr_sets(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_rrsets("Z1111", "A", "example.com.") + + self.assertEqual(self.actual_request.path, + "/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.") + + self.assertTrue(isinstance(response, ResourceRecordSets)) + self.assertEqual(response.hosted_zone_id, "Z1111") + self.assertTrue(isinstance(response[0], Record)) + + self.assertTrue(response[0].name, "test.example.com.") + self.assertTrue(response[0].ttl, "60") + self.assertTrue(response[0].type, "A") + + evaluate_record = response[2] + self.assertEqual(evaluate_record.name, 'us-west-2-evaluate-health.example.com.') + self.assertEqual(evaluate_record.type, 'A') + self.assertEqual(evaluate_record.identifier, 'latency-example-us-west-2-evaluate-health') + self.assertEqual(evaluate_record.region, 'us-west-2') + self.assertEqual(evaluate_record.alias_hosted_zone_id, 'ABCDEFG123456') + self.assertTrue(evaluate_record.alias_evaluate_target_health) + self.assertEqual(evaluate_record.alias_dns_name, 'example-123456-evaluate-health.us-west-2.elb.amazonaws.com.') + evaluate_xml = evaluate_record.to_xml() + self.assertTrue('true' in evaluate_xml) + + no_evaluate_record = response[3] + self.assertEqual(no_evaluate_record.name, 'us-west-2-no-evaluate-health.example.com.') + self.assertEqual(no_evaluate_record.type, 'A') + self.assertEqual(no_evaluate_record.identifier, 'latency-example-us-west-2-no-evaluate-health') + self.assertEqual(no_evaluate_record.region, 'us-west-2') + self.assertEqual(no_evaluate_record.alias_hosted_zone_id, 'ABCDEFG567890') + self.assertFalse(no_evaluate_record.alias_evaluate_target_health) + self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.') + no_evaluate_xml = no_evaluate_record.to_xml() + self.assertTrue('false' in no_evaluate_xml) + + failover_record = response[4] + self.assertEqual(failover_record.name, 'failover.example.com.') + self.assertEqual(failover_record.type, 'A') + self.assertEqual(failover_record.identifier, 'failover-primary') + self.assertEqual(failover_record.failover, 'PRIMARY') + self.assertEqual(failover_record.ttl, '60') + +@attr(route53=True) +class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestChangeResourceRecordSetsRoute53, self).setUp() + + def default_body(self): + return """ + + + /change/C1111111111111 + PENDING + 2014-05-05T10:11:12.123Z + + + """ + + def test_record_commit(self): + rrsets = ResourceRecordSets(self.service_connection) + rrsets.add_change_record('CREATE', Record('vanilla.example.com', 'A', 60, ['1.2.3.4'])) + rrsets.add_change_record('CREATE', Record('alias.example.com', 'AAAA', alias_hosted_zone_id='Z123OTHER', alias_dns_name='target.other', alias_evaluate_target_health=True)) + rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1')) + rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1')) + rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary')) + + changes_xml = rrsets.to_xml() + + # the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines + # not the best, but + actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml()) + expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(""" + + + None + + + CREATE + + vanilla.example.com + A + 60 + + + 1.2.3.4 + + + + + + CREATE + + alias.example.com + AAAA + + Z123OTHER + target.other + true + + + + + CREATE + + wrr.example.com + CNAME + weight-1 + 10 + 60 + + + cname.target + + + + + + CREATE + + lbr.example.com + TXT + region-1 + us-west-2 + 60 + + + text record + + + + + + CREATE + + failover.example.com + A + primary + PRIMARY + 60 + + + 2.2.2.2 + + + hc-1234 + + + + + + """).toprettyxml()) + + # Note: the alias XML should not include the TTL, even if it's specified in the object model + self.assertEqual(actual_xml, expected_xml) + diff -Nru python-boto-2.20.1/tests/unit/s3/test_bucket.py python-boto-2.29.1/tests/unit/s3/test_bucket.py --- python-boto-2.20.1/tests/unit/s3/test_bucket.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/s3/test_bucket.py 2014-05-30 20:49:34.000000000 +0000 @@ -4,8 +4,13 @@ from tests.unit import unittest from tests.unit import AWSMockServiceTestCase +from boto.exception import BotoClientError from boto.s3.connection import S3Connection from boto.s3.bucket import Bucket +from boto.s3.deletemarker import DeleteMarker +from boto.s3.key import Key +from boto.s3.multipart import MultiPartUpload +from boto.s3.prefix import Prefix class TestS3Bucket(AWSMockServiceTestCase): @@ -108,20 +113,85 @@ 'initial=1&bar=%E2%98%83&max-keys=0&foo=true&some-other=thing' ) - @patch.object(Bucket, 'get_all_keys') - def test_bucket_copy_key_no_validate(self, mock_get_all_keys): + @patch.object(S3Connection, 'head_bucket') + def test_bucket_copy_key_no_validate(self, mock_head_bucket): self.set_http_response(status_code=200) bucket = self.service_connection.create_bucket('mybucket') - self.assertFalse(mock_get_all_keys.called) + self.assertFalse(mock_head_bucket.called) self.service_connection.get_bucket('mybucket', validate=True) - self.assertTrue(mock_get_all_keys.called) + self.assertTrue(mock_head_bucket.called) - mock_get_all_keys.reset_mock() - self.assertFalse(mock_get_all_keys.called) + mock_head_bucket.reset_mock() + self.assertFalse(mock_head_bucket.called) try: bucket.copy_key('newkey', 'srcbucket', 'srckey', preserve_acl=True) except: # Will throw because of empty response. pass - self.assertFalse(mock_get_all_keys.called) + self.assertFalse(mock_head_bucket.called) + + @patch.object(Bucket, '_get_all') + def test_bucket_encoding(self, mock_get_all): + self.set_http_response(status_code=200) + bucket = self.service_connection.get_bucket('mybucket') + + # First, without the encoding. + mock_get_all.reset_mock() + bucket.get_all_keys() + mock_get_all.assert_called_with( + [ + ('Contents', Key), + ('CommonPrefixes', Prefix) + ], '', None + ) + + # Now the variants with the encoding. + mock_get_all.reset_mock() + bucket.get_all_keys(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Contents', Key), + ('CommonPrefixes', Prefix) + ], '', None, + encoding_type='url' + ) + + mock_get_all.reset_mock() + bucket.get_all_versions(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Version', Key), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker), + ], 'versions', None, + encoding_type='url' + ) + + mock_get_all.reset_mock() + bucket.get_all_multipart_uploads(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Upload', MultiPartUpload), + ('CommonPrefixes', Prefix) + ], 'uploads', None, + encoding_type='url' + ) + + @patch.object(Bucket, 'get_all_keys') + @patch.object(Bucket, '_get_key_internal') + def test_bucket_get_key_no_validate(self, mock_gki, mock_gak): + self.set_http_response(status_code=200) + bucket = self.service_connection.get_bucket('mybucket') + key = bucket.get_key('mykey', validate=False) + + self.assertEqual(len(mock_gki.mock_calls), 0) + self.assertTrue(isinstance(key, Key)) + self.assertEqual(key.name, 'mykey') + + with self.assertRaises(BotoClientError): + bucket.get_key( + 'mykey', + version_id='something', + validate=False + ) diff -Nru python-boto-2.20.1/tests/unit/s3/test_connection.py python-boto-2.29.1/tests/unit/s3/test_connection.py --- python-boto-2.20.1/tests/unit/s3/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/s3/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,178 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import mock +import time + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase +from tests.unit import MockServiceWithConfigTestCase + +from boto.s3.connection import S3Connection, HostRequiredError +from boto.s3.connection import S3ResponseError, Bucket + + +class TestSignatureAlteration(AWSMockServiceTestCase): + connection_class = S3Connection + + def test_unchanged(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['s3'] + ) + + def test_switched(self): + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.cn-north-1.amazonaws.com.cn' + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4-s3'] + ) + + +class TestSigV4HostError(MockServiceWithConfigTestCase): + connection_class = S3Connection + + def test_historical_behavior(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['s3'] + ) + self.assertEqual(self.service_connection.host, 's3.amazonaws.com') + + def test_sigv4_opt_in(self): + # Switch it at the config, so we can check to see how the host is + # handled. + self.config = { + 's3': { + 'use-sigv4': True, + } + } + + with self.assertRaises(HostRequiredError): + # No host+SigV4 == KABOOM + self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more' + ) + + # Ensure passing a ``host`` still works. + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.cn-north-1.amazonaws.com.cn' + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4-s3'] + ) + self.assertEqual( + conn.host, + 's3.cn-north-1.amazonaws.com.cn' + ) + + + +class TestUnicodeCallingFormat(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + return """ + + + bcaf1ffd86f461ca5fb16fd081034f + webfile + + + + quotes + 2006-02-03T16:45:09.000Z + + + samples + 2006-02-03T16:41:58.000Z + + +""" + + def create_service_connection(self, **kwargs): + kwargs['calling_format'] = u'boto.s3.connection.OrdinaryCallingFormat' + return super(TestUnicodeCallingFormat, + self).create_service_connection(**kwargs) + + def test_unicode_calling_format(self): + self.set_http_response(status_code=200) + self.service_connection.get_all_buckets() + + +class TestHeadBucket(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + # HEAD requests always have an empty body. + return "" + + def test_head_bucket_success(self): + self.set_http_response(status_code=200) + buck = self.service_connection.head_bucket('my-test-bucket') + self.assertTrue(isinstance(buck, Bucket)) + self.assertEqual(buck.name, 'my-test-bucket') + + def test_head_bucket_forbidden(self): + self.set_http_response(status_code=403) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('cant-touch-this') + + err = cm.exception + self.assertEqual(err.status, 403) + self.assertEqual(err.error_code, 'AccessDenied') + self.assertEqual(err.message, 'Access Denied') + + def test_head_bucket_notfound(self): + self.set_http_response(status_code=404) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('totally-doesnt-exist') + + err = cm.exception + self.assertEqual(err.status, 404) + self.assertEqual(err.error_code, 'NoSuchBucket') + self.assertEqual(err.message, 'The specified bucket does not exist') + + def test_head_bucket_other(self): + self.set_http_response(status_code=405) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('you-broke-it') + + err = cm.exception + self.assertEqual(err.status, 405) + # We don't have special-cases for this error status. + self.assertEqual(err.error_code, None) + self.assertEqual(err.message, '') + + +if __name__ == "__main__": + unittest.main() diff -Nru python-boto-2.20.1/tests/unit/ses/test_identity.py python-boto-2.29.1/tests/unit/ses/test_identity.py --- python-boto-2.20.1/tests/unit/ses/test_identity.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/ses/test_identity.py 2014-05-30 20:49:34.000000000 +0000 @@ -78,5 +78,94 @@ tokens[2]) +class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityNotificationTopic, self).setUp() + + def default_body(self): + return """ + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_notification_topic_bounce(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Bounce', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_complaint(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Complaint', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + +class TestSESSetIdentityFeedbackForwardingEnabled(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityFeedbackForwardingEnabled, self).setUp() + + def default_body(self): + return """ + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_feedback_forwarding_enabled_true(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=True) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_enabled_false(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=False) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/sns/test_connection.py python-boto-2.29.1/tests/unit/sns/test_connection.py --- python-boto-2.20.1/tests/unit/sns/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/sns/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -225,6 +225,17 @@ 'MessageStructure': 'json', }, ignore_params_values=['Version', 'ContentType']) + def test_publish_with_utf8_message(self): + self.set_http_response(status_code=200) + subject = message = u'We \u2665 utf-8'.encode('utf-8') + self.service_connection.publish('topic', message, subject) + self.assert_request_parameters({ + 'Action': 'Publish', + 'TopicArn': 'topic', + 'Subject': subject, + 'Message': message, + }, ignore_params_values=['Version', 'ContentType']) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/sqs/test_connection.py python-boto-2.29.1/tests/unit/sqs/test_connection.py --- python-boto-2.20.1/tests/unit/sqs/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/sqs/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -1,5 +1,6 @@ #!/usr/bin/env python # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -26,6 +27,8 @@ from boto.sqs.connection import SQSConnection from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.message import RawMessage +from boto.sqs.queue import Queue class SQSAuthParams(AWSMockServiceTestCase): @@ -104,7 +107,172 @@ assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys() self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985') - + + +class SQSMessageAttributesParsing(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + + This is a test + +eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo= + ce114e4501d2f4e2dcea3e17b546f339 + + Count + + Number + 1 + + + + Foo + + String + Bar + + + 7049431b-e5f6-430b-93c4-ded53864d02b + 324758f82d026ac6ec5b31a3b192d1e3 + + + + 73f978f2-400b-5460-8d38-3316e39e79c6 + +""" + + def test_message_attribute_response(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + message = self.service_connection.receive_message(queue)[0] + + self.assertEqual(message.get_body(), 'This is a test') + self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b') + self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339') + self.assertEqual(message.md5_message_attributes, + '324758f82d026ac6ec5b31a3b192d1e3') + + mattributes = message.message_attributes + self.assertEqual(len(mattributes.keys()), 2) + self.assertEqual(mattributes['Count']['data_type'], 'Number') + self.assertEqual(mattributes['Foo']['string_value'], 'Bar') + + +class SQSSendMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + fafb00f5732ab283681e124bf8747ed1 + + + 3ae8f24a165a8cedc005670c81a27295 + + + 5fea7756-0ea4-451a-a703-a558b933e274 + + + + + 27daac76-34dd-47df-bd01-1f6e873584a0 + + + +""" + + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + self.service_connection.send_message(queue, 'Test message', + message_attributes={ + 'name1': { + 'data_type': 'String', + 'string_value': 'Bob' + }, + 'name2': { + 'data_type': 'Number', + 'string_value': '1' + } + }) + + self.assert_request_parameters({ + 'Action': 'SendMessage', + 'MessageAttribute.1.Name': 'name2', + 'MessageAttribute.1.Value.DataType': 'Number', + 'MessageAttribute.1.Value.StringValue': '1', + 'MessageAttribute.2.Name': 'name1', + 'MessageAttribute.2.Value.DataType': 'String', + 'MessageAttribute.2.Value.StringValue': 'Bob', + 'MessageBody': 'Test message', + 'Version': '2012-11-05' + }) + + +class SQSSendBatchMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + test_msg_001 + 0a5231c7-8bff-4955-be2e-8dc7c50a25fa + 0e024d309850c78cba5eabbeff7cae71 + + + test_msg_002 + 15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9 + 7fb8146a82f95e0af155278f406862c2 + 295c5fa15a51aae6884d1d7c1d99ca50 + + + + ca1ad5d0-8271-408b-8d0f-1351bf547e74 + + +""" + + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + + message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String', + 'string_value': 'foo'}}) + message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number', + 'string_value': '1'}}) + + self.service_connection.send_message_batch(queue, (message1, message2)) + + self.assert_request_parameters({ + 'Action': 'SendMessageBatch', + 'SendMessageBatchRequestEntry.1.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.1.Id': 1, + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo', + 'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1', + 'SendMessageBatchRequestEntry.2.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.2.Id': 2, + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1', + 'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2', + 'Version': '2012-11-05' + }) + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/sqs/test_message.py python-boto-2.29.1/tests/unit/sqs/test_message.py --- python-boto-2.20.1/tests/unit/sqs/test_message.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/sqs/test_message.py 2014-05-30 20:49:34.000000000 +0000 @@ -23,6 +23,7 @@ from boto.sqs.message import MHMessage from boto.sqs.message import RawMessage +from boto.sqs.bigmessage import BigMessage from boto.exception import SQSDecodeError @@ -62,5 +63,32 @@ self.assertEquals(message.id, sample_value) self.assertEquals(message.receipt_handle, sample_value) + +class TestBigMessage(unittest.TestCase): + + def test_s3url_parsing(self): + msg = BigMessage() + # Try just a bucket name + bucket, key = msg._get_bucket_key('s3://foo') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, None) + # Try just a bucket name with trailing "/" + bucket, key = msg._get_bucket_key('s3://foo/') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, None) + # Try a bucket and a key + bucket, key = msg._get_bucket_key('s3://foo/bar') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, 'bar') + # Try a bucket and a key with "/" + bucket, key = msg._get_bucket_key('s3://foo/bar/fie/baz') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, 'bar/fie/baz') + # Try it with no s3:// prefix + with self.assertRaises(SQSDecodeError) as context: + bucket, key = msg._get_bucket_key('foo/bar') + + + if __name__ == '__main__': unittest.main() diff -Nru python-boto-2.20.1/tests/unit/sts/test_connection.py python-boto-2.29.1/tests/unit/sts/test_connection.py --- python-boto-2.20.1/tests/unit/sts/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/sts/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -26,6 +26,18 @@ from tests.unit import AWSMockServiceTestCase +class TestSecurityToken(AWSMockServiceTestCase): + connection_class = STSConnection + + def create_service_connection(self, **kwargs): + kwargs['security_token'] = 'token' + + return super(TestSecurityToken, self).create_service_connection(**kwargs) + + def test_security_token(self): + self.assertEqual('token', + self.service_connection.provider.security_token) + class TestSTSConnection(AWSMockServiceTestCase): connection_class = STSConnection @@ -63,6 +75,29 @@ ignore_params_values=['Timestamp', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version']) + self.assertEqual(response.credentials.access_key, 'accesskey') + self.assertEqual(response.credentials.secret_key, 'secretkey') + self.assertEqual(response.credentials.session_token, 'session_token') + self.assertEqual(response.user.arn, 'arn:role') + self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession') + + def test_assume_role_with_mfa(self): + self.set_http_response(status_code=200) + response = self.service_connection.assume_role( + 'arn:role', + 'mysession', + mfa_serial_number='GAHT12345678', + mfa_token='abc123' + ) + self.assert_request_parameters( + {'Action': 'AssumeRole', + 'RoleArn': 'arn:role', + 'RoleSessionName': 'mysession', + 'SerialNumber': 'GAHT12345678', + 'TokenCode': 'abc123'}, + ignore_params_values=['Timestamp', 'AWSAccessKeyId', + 'SignatureMethod', 'SignatureVersion', + 'Version']) self.assertEqual(response.credentials.access_key, 'accesskey') self.assertEqual(response.credentials.secret_key, 'secretkey') self.assertEqual(response.credentials.session_token, 'session_token') diff -Nru python-boto-2.20.1/tests/unit/swf/test_layer2_base.py python-boto-2.29.1/tests/unit/swf/test_layer2_base.py --- python-boto-2.20.1/tests/unit/swf/test_layer2_base.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/swf/test_layer2_base.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,31 @@ +import boto.swf.layer2 +from boto.swf.layer2 import SWFBase +from tests.unit import unittest +from mock import Mock + + +MOCK_DOMAIN = 'Mock' +MOCK_ACCESS_KEY = 'inheritable access key' +MOCK_SECRET_KEY = 'inheritable secret key' +MOCK_REGION = 'Mock Region' + + +class TestBase(unittest.TestCase): + """ + Test for SWFBase. + """ + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + self.swf_base = SWFBase( + domain=MOCK_DOMAIN, aws_access_key_id=MOCK_ACCESS_KEY, + aws_secret_access_key=MOCK_SECRET_KEY, region=MOCK_REGION + ) + + def test_instantiation(self): + self.assertEquals(MOCK_DOMAIN, self.swf_base.domain) + self.assertEquals(MOCK_ACCESS_KEY, self.swf_base.aws_access_key_id) + self.assertEquals(MOCK_SECRET_KEY, + self.swf_base.aws_secret_access_key) + self.assertEquals(MOCK_REGION, self.swf_base.region) + boto.swf.layer2.Layer1.assert_called_with( + MOCK_ACCESS_KEY, MOCK_SECRET_KEY, region=MOCK_REGION) diff -Nru python-boto-2.20.1/tests/unit/test_connection.py python-boto-2.29.1/tests/unit/test_connection.py --- python-boto-2.20.1/tests/unit/test_connection.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/test_connection.py 2014-05-30 20:49:34.000000000 +0000 @@ -82,7 +82,7 @@ proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): self.region = region if host is None: host = self.region.endpoint @@ -93,7 +93,8 @@ host, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) class TestAWSAuthConnection(unittest.TestCase): def test_get_path(self): diff -Nru python-boto-2.20.1/tests/unit/test_endpoints.json python-boto-2.29.1/tests/unit/test_endpoints.json --- python-boto-2.20.1/tests/unit/test_endpoints.json 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/test_endpoints.json 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "ec2": { + "test-1": "ec2.test-1.amazonaws.com" + } +} diff -Nru python-boto-2.20.1/tests/unit/test_regioninfo.py python-boto-2.29.1/tests/unit/test_regioninfo.py --- python-boto-2.20.1/tests/unit/test_regioninfo.py 1970-01-01 00:00:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/test_regioninfo.py 2014-05-30 20:49:34.000000000 +0000 @@ -0,0 +1,146 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +from tests.unit import unittest + +import boto +from boto.compat import json +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints +from boto.regioninfo import load_regions, get_regions + + +class TestRegionInfo(object): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + self.connection_cls = connection_cls + + +class FakeConn(object): + pass + + +class TestEndpointLoading(unittest.TestCase): + def setUp(self): + super(TestEndpointLoading, self).setUp() + + def test_load_endpoint_json(self): + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + self.assertTrue('ec2' in endpoints) + self.assertEqual( + endpoints['ec2']['us-east-1'], + 'ec2.us-east-1.amazonaws.com' + ) + + def test_merge_endpoints(self): + defaults = { + 'ec2': { + 'us-east-1': 'ec2.us-east-1.amazonaws.com', + 'us-west-1': 'ec2.us-west-1.amazonaws.com', + } + } + additions = { + # Top-level addition. + 's3': { + 'us-east-1': 's3.amazonaws.com' + }, + 'ec2': { + # Overwrite. This doesn't exist, just test data. + 'us-east-1': 'ec2.auto-resolve.amazonaws.com', + # Deep addition. + 'us-west-2': 'ec2.us-west-2.amazonaws.com', + } + } + + endpoints = merge_endpoints(defaults, additions) + self.assertEqual(endpoints, { + 'ec2': { + 'us-east-1': 'ec2.auto-resolve.amazonaws.com', + 'us-west-1': 'ec2.us-west-1.amazonaws.com', + 'us-west-2': 'ec2.us-west-2.amazonaws.com', + }, + 's3': { + 'us-east-1': 's3.amazonaws.com' + } + }) + + def test_load_regions(self): + # Just the defaults. + endpoints = load_regions() + self.assertTrue('us-east-1' in endpoints['ec2']) + self.assertFalse('test-1' in endpoints['ec2']) + + # With ENV overrides. + os.environ['BOTO_ENDPOINTS'] = os.path.join( + os.path.dirname(__file__), + 'test_endpoints.json' + ) + self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS') + endpoints = load_regions() + self.assertTrue('us-east-1' in endpoints['ec2']) + self.assertTrue('test-1' in endpoints['ec2']) + self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com') + + def test_get_regions(self): + # With defaults. + ec2_regions = get_regions('ec2') + self.assertEqual(len(ec2_regions), 10) + west_2 = None + + for region_info in ec2_regions: + if region_info.name == 'us-west-2': + west_2 = region_info + break + + self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!") + self.assertTrue(isinstance(west_2, RegionInfo)) + self.assertEqual(west_2.name, 'us-west-2') + self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com') + self.assertEqual(west_2.connection_cls, None) + + def test_get_regions_overrides(self): + ec2_regions = get_regions( + 'ec2', + region_cls=TestRegionInfo, + connection_cls=FakeConn + ) + self.assertEqual(len(ec2_regions), 10) + west_2 = None + + for region_info in ec2_regions: + if region_info.name == 'us-west-2': + west_2 = region_info + break + + self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!") + self.assertFalse(isinstance(west_2, RegionInfo)) + self.assertTrue(isinstance(west_2, TestRegionInfo)) + self.assertEqual(west_2.name, 'us-west-2') + self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com') + self.assertEqual(west_2.connection_cls, FakeConn) + + +if __name__ == '__main__': + unittest.main() diff -Nru python-boto-2.20.1/tests/unit/utils/test_utils.py python-boto-2.29.1/tests/unit/utils/test_utils.py --- python-boto-2.20.1/tests/unit/utils/test_utils.py 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/tests/unit/utils/test_utils.py 2014-05-30 20:49:34.000000000 +0000 @@ -26,8 +26,9 @@ import hashlib import hmac - import mock +import thread +import time import boto.utils from boto.utils import Password @@ -39,6 +40,21 @@ from boto.compat import json + +@unittest.skip("http://bugs.python.org/issue7980") +class TestThreadImport(unittest.TestCase): + def test_strptime(self): + def f(): + for m in xrange(1, 13): + for d in xrange(1,29): + boto.utils.parse_ts('2013-01-01T00:00:00Z') + + for _ in xrange(10): + thread.start_new_thread(f, ()) + + time.sleep(3) + + class TestPassword(unittest.TestCase): """Test basic password functionality""" diff -Nru python-boto-2.20.1/.travis.yml python-boto-2.29.1/.travis.yml --- python-boto-2.20.1/.travis.yml 2013-12-13 20:46:00.000000000 +0000 +++ python-boto-2.29.1/.travis.yml 2014-05-30 20:49:34.000000000 +0000 @@ -4,5 +4,5 @@ - "2.7" before_install: - sudo apt-get install swig -install: pip install --use-mirrors -r requirements.txt +install: pip install --allow-all-external -r requirements.txt script: python tests/test.py unit